v0.10.1rc1
This commit is contained in:
133
tests/ut/attention/test_attention_mask.py
Normal file
133
tests/ut/attention/test_attention_mask.py
Normal file
@@ -0,0 +1,133 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
|
||||
|
||||
|
||||
class TestAttentionMaskBuilder(TestBase):
|
||||
|
||||
def test_init_attention_mask_builder(self):
|
||||
# generate attention_mask_builder with float16
|
||||
attention_mask_builder = AttentionMaskBuilder(max_seq_len=1024,
|
||||
dtype=torch.float16)
|
||||
self.assertEqual(attention_mask_builder._seq_len_cached, 1024)
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache.dtype,
|
||||
torch.float16)
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache.shape,
|
||||
(1024, 1024))
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache[0][-1],
|
||||
torch.tensor(float("-inf"), dtype=torch.float16))
|
||||
|
||||
# generate attention_mask_builder with bfloat16
|
||||
attention_mask_builder = AttentionMaskBuilder(max_seq_len=2048,
|
||||
dtype=torch.bfloat16)
|
||||
self.assertEqual(attention_mask_builder._seq_len_cached, 2048)
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache.dtype,
|
||||
torch.bfloat16)
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache.shape,
|
||||
(2048, 2048))
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache[0][-1],
|
||||
torch.tensor(1, dtype=torch.bfloat16))
|
||||
|
||||
def test_get_mask_scale_factor(self):
|
||||
# supported data types
|
||||
self.assertEqual(
|
||||
AttentionMaskBuilder.get_mask_scale_factor(torch.float16), 1)
|
||||
self.assertEqual(
|
||||
AttentionMaskBuilder.get_mask_scale_factor(torch.bfloat16), -10000)
|
||||
# mask_scale_factor now only supports data types: torch.float16 and torch.bfloat16
|
||||
# Otherwise raise ValueError
|
||||
with self.assertRaises(ValueError):
|
||||
AttentionMaskBuilder.get_mask_scale_factor(torch.int8)
|
||||
|
||||
def test_get_attn_mask(self):
|
||||
# if the len is less than max_seq_len, the attn_mask_cache will not be updated
|
||||
attention_mask_builder = AttentionMaskBuilder(max_seq_len=1024,
|
||||
dtype=torch.float16)
|
||||
attn_mask = attention_mask_builder.get_attn_mask(
|
||||
max_seq_len=512, dtype=torch.float16, device=torch.device("cpu"))
|
||||
self.assertEqual(attn_mask.shape, (512, 512))
|
||||
self.assertEqual(attn_mask[0][-1],
|
||||
torch.tensor(float("-inf"), dtype=torch.float16))
|
||||
self.assertEqual(attention_mask_builder._seq_len_cached, 1024)
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache.shape,
|
||||
(1024, 1024))
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache[0][-1],
|
||||
torch.tensor(float("-inf"), dtype=torch.float16))
|
||||
|
||||
# if the len is greater than max_seq_len, the attn_mask_cache will be updated
|
||||
attn_mask = attention_mask_builder.get_attn_mask(
|
||||
max_seq_len=2048, dtype=torch.float16, device=torch.device("cpu"))
|
||||
self.assertEqual(attn_mask.shape, (2048, 2048))
|
||||
self.assertEqual(attn_mask[0][-1],
|
||||
torch.tensor(float("-inf"), dtype=torch.float16))
|
||||
self.assertEqual(attention_mask_builder._seq_len_cached, 2048)
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache.shape,
|
||||
(2048, 2048))
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache[0][-1],
|
||||
torch.tensor(float("-inf"), dtype=torch.float16))
|
||||
|
||||
def test_get_splitfuse_attn_mask(self):
|
||||
attention_mask_builder = AttentionMaskBuilder(max_seq_len=1024,
|
||||
dtype=torch.float16)
|
||||
attn_mask = attention_mask_builder.get_splitfuse_attn_mask(
|
||||
seq_lens=torch.tensor([10, 20, 100]),
|
||||
position=torch.tensor([7, 8, 9, 18, 19, 99]),
|
||||
dtype=torch.float16,
|
||||
device=torch.device("cpu"),
|
||||
)
|
||||
self.assertEqual(attn_mask.shape, (6, 100))
|
||||
self.assertEqual(attention_mask_builder._seq_len_cached, 1024)
|
||||
|
||||
attn_mask = attention_mask_builder.get_splitfuse_attn_mask(
|
||||
seq_lens=torch.tensor([10, 3000, 2000]),
|
||||
position=torch.tensor([7, 8, 9, 2999, 1999]),
|
||||
dtype=torch.float16,
|
||||
device=torch.device("cpu"),
|
||||
)
|
||||
self.assertEqual(attn_mask.shape, (5, 3000))
|
||||
self.assertEqual(attention_mask_builder._seq_len_cached, 3000)
|
||||
|
||||
# splitfuse_attn_mask now only supports data types: torch.float16 and torch.bfloat16
|
||||
# otherwise raise ValueError
|
||||
with self.assertRaises(ValueError):
|
||||
attn_mask = attention_mask_builder.get_splitfuse_attn_mask(
|
||||
seq_lens=torch.tensor([10, 20, 100]),
|
||||
position=torch.tensor([7, 8, 9, 18, 19, 99]),
|
||||
dtype=torch.int8,
|
||||
device=torch.device("cpu"),
|
||||
)
|
||||
|
||||
def test_mask_value_cleanliness(self):
|
||||
attention_mask_builder = AttentionMaskBuilder(max_seq_len=6,
|
||||
dtype=torch.bfloat16)
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache[-2][-1],
|
||||
torch.tensor(1, dtype=torch.bfloat16))
|
||||
|
||||
attn_mask = attention_mask_builder.get_splitfuse_attn_mask(
|
||||
seq_lens=torch.tensor([6]),
|
||||
position=torch.tensor([3, 4, 5]),
|
||||
dtype=torch.bfloat16,
|
||||
device=torch.device("cpu"),
|
||||
)
|
||||
self.assertEqual(
|
||||
attn_mask[-2][-1],
|
||||
torch.tensor(-10000, dtype=torch.bfloat16,
|
||||
device=attn_mask.device))
|
||||
self.assertEqual(attention_mask_builder.attn_mask_cache[-2][-1],
|
||||
torch.tensor(1, dtype=torch.bfloat16))
|
||||
578
tests/ut/attention/test_attention_v1.py
Normal file
578
tests/ut/attention/test_attention_v1.py
Normal file
@@ -0,0 +1,578 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import torch
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.attention.attention_v1 import (AscendAttentionBackend,
|
||||
AscendAttentionBackendImpl,
|
||||
AscendAttentionMetadataBuilder,
|
||||
AscendAttentionState,
|
||||
AscendMetadata,
|
||||
CommonAttentionState)
|
||||
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
||||
|
||||
|
||||
class TestAscendAttentionBackend(TestBase):
|
||||
|
||||
def test_get_name(self):
|
||||
self.assertEqual(AscendAttentionBackend.get_name(), "ASCEND")
|
||||
|
||||
def test_get_impl_cls(self):
|
||||
self.assertEqual(AscendAttentionBackend.get_impl_cls(),
|
||||
AscendAttentionBackendImpl)
|
||||
|
||||
def test_get_metadata_cls(self):
|
||||
self.assertEqual(AscendAttentionBackend.get_metadata_cls(),
|
||||
AscendMetadata)
|
||||
|
||||
def test_get_state_cls(self):
|
||||
self.assertEqual(AscendAttentionBackend.get_state_cls(),
|
||||
CommonAttentionState)
|
||||
|
||||
def test_get_builder_cls(self):
|
||||
self.assertEqual(AscendAttentionBackend.get_builder_cls(),
|
||||
AscendAttentionMetadataBuilder)
|
||||
|
||||
@patch('vllm_ascend.attention.attention_v1.is_310p')
|
||||
def test_get_kv_cache_shape_310p(self, mock_is_310p):
|
||||
mock_is_310p.return_value = True
|
||||
result = AscendAttentionBackend.get_kv_cache_shape(10, 20, 30, 40)
|
||||
self.assertEqual(result, (2, 10, 30 * 40 // 16, 20, 16))
|
||||
|
||||
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=False)
|
||||
def test_get_kv_cache_shape_not_310p(self, mock_is_310p):
|
||||
result = AscendAttentionBackend.get_kv_cache_shape(10, 20, 30, 40)
|
||||
self.assertEqual(result, (2, 10, 20, 30, 40))
|
||||
|
||||
def test_get_bsh_kv_cache_shape(self):
|
||||
result = AscendAttentionBackend.get_bsh_kv_cache_shape(10, 20, 30, 40)
|
||||
self.assertEqual(result, (2, 10, 20, 30 * 40))
|
||||
|
||||
def test_swap_blocks(self):
|
||||
src_kv_cache = [torch.zeros((10, 20)), torch.zeros((10, 20))]
|
||||
dst_kv_cache = [torch.zeros((10, 20)), torch.zeros((10, 20))]
|
||||
src_to_dst = torch.tensor([[0, 1], [2, 3]])
|
||||
AscendAttentionBackend.swap_blocks(src_kv_cache, dst_kv_cache,
|
||||
src_to_dst)
|
||||
self.assertTrue(torch.all(dst_kv_cache[0][1] == src_kv_cache[0][0]))
|
||||
self.assertTrue(torch.all(dst_kv_cache[1][3] == src_kv_cache[1][2]))
|
||||
|
||||
def test_copy_blocks(self):
|
||||
kv_caches = [torch.zeros((10, 20)), torch.zeros((10, 20))]
|
||||
src_to_dists = torch.tensor([[0, 1], [2, 3]])
|
||||
AscendAttentionBackend.copy_blocks(kv_caches, src_to_dists)
|
||||
self.assertTrue(torch.all(kv_caches[0][1] == kv_caches[0][0]))
|
||||
self.assertTrue(torch.all(kv_caches[1][3] == kv_caches[1][2]))
|
||||
|
||||
|
||||
class TestAscendAttentionMetadataBuilder(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_vllm_config = MagicMock()
|
||||
self.mock_vllm_config.model_config.max_model_len = 640
|
||||
self.mock_vllm_config.cache_config.block_size = 64
|
||||
self.mock_device = 'cpu:0'
|
||||
self.builder = AscendAttentionMetadataBuilder(self.mock_vllm_config,
|
||||
self.mock_device)
|
||||
|
||||
def test_reorder_batch(self):
|
||||
mock_input_batch = MagicMock()
|
||||
mock_scheduler_output = MagicMock()
|
||||
|
||||
result = self.builder.reorder_batch(mock_input_batch,
|
||||
mock_scheduler_output)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('vllm_ascend.attention.attention_v1.AscendMetadata')
|
||||
@patch('torch_npu.npu_format_cast')
|
||||
@patch('vllm_ascend.utils.nd_to_nz_2d')
|
||||
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=True)
|
||||
def test_build_prefill_no_cache(self, mock_is_310p, mock_nd_to_nz_2d,
|
||||
mock_npu_format_cast,
|
||||
mock_ascend_metadata):
|
||||
common_attn_metadata = AscendCommonAttentionMetadata(
|
||||
query_start_loc=torch.tensor([0, 3, 7]),
|
||||
query_start_loc_cpu=torch.tensor([0, 3, 7]),
|
||||
seq_lens_cpu=torch.tensor([5, 6]),
|
||||
num_reqs=2,
|
||||
num_actual_tokens=10,
|
||||
max_query_len=5,
|
||||
decode_token_per_req=torch.tensor([1, 1]),
|
||||
block_table_tensor=torch.zeros((10, 10)),
|
||||
slot_mapping_cpu=torch.tensor(range(20)),
|
||||
actual_seq_lengths_q=torch.tensor([0, 1]),
|
||||
positions=torch.tensor([10, 10]),
|
||||
attn_mask=torch.ones((10, 10)),
|
||||
spec_attn_mask=None,
|
||||
attn_state=AscendAttentionState.PrefillNoCache)
|
||||
|
||||
mock_nz_tensor = MagicMock()
|
||||
mock_model = MagicMock()
|
||||
mock_nd_to_nz_2d.return_value = mock_nz_tensor
|
||||
mock_npu_format_cast.return_value = mock_nz_tensor
|
||||
|
||||
self.builder.build(common_attn_metadata, mock_model)
|
||||
|
||||
@patch('vllm_ascend.attention.attention_v1.AscendMetadata')
|
||||
@patch('torch_npu.npu_format_cast')
|
||||
@patch('vllm_ascend.utils.nd_to_nz_spec')
|
||||
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=True)
|
||||
@patch('vllm_ascend.attention.attention_v1.AscendAttentionState')
|
||||
def test_build_chunked_prefill(self, mock_ascend_attention_state,
|
||||
mock_is_310p, mock_nd_to_nz_spec,
|
||||
mock_npu_format_cast, mock_ascend_metadata):
|
||||
common_attn_metadata = AscendCommonAttentionMetadata(
|
||||
query_start_loc=torch.tensor([0, 2, 5, 9]),
|
||||
query_start_loc_cpu=torch.tensor([0, 2, 5, 9]),
|
||||
seq_lens_cpu=torch.tensor([4, 5, 6]),
|
||||
num_reqs=3,
|
||||
num_actual_tokens=15,
|
||||
max_query_len=6,
|
||||
decode_token_per_req=torch.tensor([1, 1, 1]),
|
||||
block_table_tensor=torch.zeros((10, 10)),
|
||||
slot_mapping_cpu=torch.tensor(range(20)),
|
||||
actual_seq_lengths_q=torch.tensor([0, 1, 2]),
|
||||
positions=torch.tensor([10, 10]),
|
||||
attn_mask=torch.ones((15, 15)),
|
||||
spec_attn_mask=None,
|
||||
attn_state=AscendAttentionState.ChunkedPrefill)
|
||||
|
||||
mock_ascend_attention_state = MagicMock()
|
||||
mock_ascend_attention_state.PrefillNoCache = 0
|
||||
|
||||
mock_nz_tensor = MagicMock()
|
||||
mock_model = MagicMock()
|
||||
mock_nd_to_nz_spec.return_value = mock_nz_tensor
|
||||
mock_npu_format_cast.return_value = mock_nz_tensor
|
||||
|
||||
self.builder.build(common_attn_metadata, mock_model)
|
||||
|
||||
@patch('vllm_ascend.attention.attention_v1.AscendMetadata')
|
||||
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=False)
|
||||
def test_build_non_310p(self, mock_is_310p, mock_ascend_metadata):
|
||||
common_attn_metadata = AscendCommonAttentionMetadata(
|
||||
query_start_loc=torch.tensor([0, 2, 5, 9]),
|
||||
query_start_loc_cpu=torch.tensor([0, 2, 5, 9]),
|
||||
seq_lens_cpu=torch.tensor([4, 5, 6]),
|
||||
num_reqs=3,
|
||||
num_actual_tokens=15,
|
||||
max_query_len=6,
|
||||
decode_token_per_req=torch.tensor([1, 1, 1]),
|
||||
block_table_tensor=torch.zeros((10, 10)),
|
||||
slot_mapping_cpu=torch.tensor(range(20)),
|
||||
actual_seq_lengths_q=torch.tensor([0, 1, 2]),
|
||||
positions=torch.tensor([10, 10]),
|
||||
attn_mask=torch.ones((15, 15)),
|
||||
spec_attn_mask=None,
|
||||
attn_state=AscendAttentionState.ChunkedPrefill)
|
||||
mock_model = MagicMock()
|
||||
|
||||
self.builder.build(common_attn_metadata, mock_model)
|
||||
|
||||
|
||||
class TestAscendAttentionBackendImpl(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.layer = MagicMock()
|
||||
self.layer.layer_name = "test_layer"
|
||||
self.layer._k_scale_float = 1.0
|
||||
self.layer._v_scale_float = 1.0
|
||||
|
||||
self.attention_type = MagicMock()
|
||||
self.attention_type.DECODER = "decoder"
|
||||
self.attention_type.ENCODER = "encoder"
|
||||
|
||||
self.attn_metadata = MagicMock()
|
||||
self.attn_metadata.return_value = "1"
|
||||
|
||||
self.layer_no_quant = MagicMock(
|
||||
spec=['layer_name', '_k_scale_float', '_v_scale_float'])
|
||||
self.layer_no_quant.layer_name = "test_layer"
|
||||
self.layer_no_quant._k_scale_float = 1.0
|
||||
self.layer_no_quant._v_scale_float = 1.0
|
||||
|
||||
self.impl = AscendAttentionBackendImpl(
|
||||
num_heads=8,
|
||||
head_size=64,
|
||||
scale=1.0,
|
||||
num_kv_heads=8,
|
||||
alibi_slopes=None,
|
||||
sliding_window=None,
|
||||
kv_cache_dtype="float16",
|
||||
logits_soft_cap=None,
|
||||
attn_type=self.attention_type.DECODER,
|
||||
kv_sharing_target_layer_name=None)
|
||||
|
||||
self.impl_192 = AscendAttentionBackendImpl(
|
||||
num_heads=8,
|
||||
head_size=192,
|
||||
scale=1.0,
|
||||
num_kv_heads=8,
|
||||
alibi_slopes=None,
|
||||
sliding_window=None,
|
||||
kv_cache_dtype="float16",
|
||||
logits_soft_cap=None,
|
||||
attn_type=self.attention_type.DECODER,
|
||||
kv_sharing_target_layer_name=None)
|
||||
|
||||
self.impl_error = AscendAttentionBackendImpl(
|
||||
num_heads=8,
|
||||
head_size=192,
|
||||
scale=1.0,
|
||||
num_kv_heads=8,
|
||||
alibi_slopes=None,
|
||||
sliding_window=None,
|
||||
kv_cache_dtype="float16",
|
||||
logits_soft_cap=None,
|
||||
attn_type=None,
|
||||
kv_sharing_target_layer_name=None)
|
||||
|
||||
self.impl_swa = AscendAttentionBackendImpl(
|
||||
num_heads=8,
|
||||
head_size=64,
|
||||
scale=1.0,
|
||||
num_kv_heads=8,
|
||||
alibi_slopes=None,
|
||||
sliding_window=1024,
|
||||
kv_cache_dtype="float16",
|
||||
logits_soft_cap=None,
|
||||
attn_type=self.attention_type.DECODER,
|
||||
kv_sharing_target_layer_name=None)
|
||||
|
||||
@patch('torch.ops.vllm.unified_ascend_attention_with_output')
|
||||
def test_forward_trace_flag_true(self, mock_unified_attention):
|
||||
"""Test forward pass when trace_flag is True"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 0, 0, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
layer = self.layer
|
||||
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=True)
|
||||
|
||||
mock_unified_attention.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu._npu_paged_attention_splitfuse')
|
||||
def test_forward_with_quant_method(self, mock_paged_attention):
|
||||
"""Test forward pass when layer has quant_method"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
k_cache = torch.ones(1, 10, 8, 64, dtype=torch.int8)
|
||||
v_cache = torch.ones(1, 10, 8, 64, dtype=torch.int8)
|
||||
kv_cache = [k_cache, v_cache]
|
||||
ret_value = torch.ones(1, 1, 10, 8, 64, dtype=torch.int8)
|
||||
|
||||
metadata = MagicMock()
|
||||
metadata.num_actual_tokens = torch.randn(10, 8 * 64)
|
||||
metadata.block_tables = torch.randn(10, 8 * 64)
|
||||
metadata.seq_lens = torch.randn(10, 8 * 64)
|
||||
metadata.attn_mask = torch.randn(10, 8 * 64)
|
||||
metadata.query_lens = torch.randn(10, 8 * 64)
|
||||
layer = self.layer
|
||||
layer.quant_method = MagicMock()
|
||||
layer.quant_method.apply.return_value = ret_value
|
||||
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
layer.quant_method.apply.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
def test_forward_no_attn_metadata(self):
|
||||
"""Test forward pass when attn_metadata is None"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 0, 0, 8, 64)
|
||||
layer = self.layer_no_quant
|
||||
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
None,
|
||||
trace_flag=False)
|
||||
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu._npu_flash_attention')
|
||||
def test_forward_prefill_no_cache(self, mock_flash_attention,
|
||||
mock_reshape_cache):
|
||||
"""Test forward pass in PrefillNoCache state"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_state = AscendAttentionState.PrefillNoCache
|
||||
metadata.attn_mask = torch.randn(1, 1, 10, 10)
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
# layer.quant_method.apply.return_value = metadata
|
||||
print(self.layer_no_quant._v_scale_float)
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_reshape_cache.assert_called_once()
|
||||
mock_flash_attention.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu._npu_flash_attention')
|
||||
def test_forward_prefill_no_cache_swa(self, mock_flash_attention,
|
||||
mock_reshape_cache):
|
||||
"""Test forward pass in PrefillNoCache state"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_state = AscendAttentionState.PrefillNoCache
|
||||
metadata.attn_mask = torch.randn(1, 1, 10, 10)
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
# layer.quant_method.apply.return_value = metadata
|
||||
print(self.layer_no_quant._v_scale_float)
|
||||
output = self.impl_swa.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_reshape_cache.assert_called_once()
|
||||
mock_flash_attention.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu._npu_flash_attention_qlens')
|
||||
def test_forward_prefill_cache_hit(self, mock_flash_attention_qlens,
|
||||
mock_npu_reshape_and_cache):
|
||||
"""Test forward pass in PrefillCacheHit state"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_state = AscendAttentionState.PrefillCacheHit
|
||||
metadata.attn_mask = torch.randn(1, 1, 10, 10)
|
||||
metadata.query_lens = torch.tensor([10])
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_flash_attention_qlens.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu._npu_paged_attention')
|
||||
def test_forward_decode_only(self, mock_paged_attention,
|
||||
mock_npu_reshape_and_cache):
|
||||
"""Test forward pass in DecodeOnly state"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_state = AscendAttentionState.DecodeOnly
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_paged_attention.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu.npu_fused_infer_attention_score')
|
||||
def test_forward_decode_only_swa(self, mock_fused_infer_attention_score,
|
||||
mock_npu_reshape_and_cache):
|
||||
"""Test forward pass in DecodeOnly state"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_state = AscendAttentionState.DecodeOnly
|
||||
metadata.seq_lens = torch.tensor([10] * 10)
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 100
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
mock_fused_infer_attention_score.return_value = (torch.ones(10, 8,
|
||||
64), 1)
|
||||
output = self.impl_swa.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
print(output.shape)
|
||||
mock_fused_infer_attention_score.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=False)
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('vllm_ascend.attention.attention_v1.vanilla_chunked_prefill')
|
||||
def test_forward_head_size_192(self, mock_vanilla_prefill,
|
||||
mock_npu_reshape_and_cache, mock_is_310p):
|
||||
"""Test forward pass when head_size is 192"""
|
||||
|
||||
self.impl.head_size = 192
|
||||
query = torch.randn(10, 8 * 192)
|
||||
key = torch.randn(10, 8 * 192)
|
||||
value = torch.randn(10, 8 * 192)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 192)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_mask = torch.randn(1, 1, 10, 10)
|
||||
metadata.query_lens = torch.tensor([10])
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
mock_vanilla_prefill.return_value = MagicMock()
|
||||
|
||||
output = self.impl_192.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_vanilla_prefill.assert_called_once()
|
||||
assert output.shape == (10, 8 * 192)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu._npu_paged_attention_splitfuse')
|
||||
def test_forward_normal_v1_situation(self, mock_paged_attention,
|
||||
mock_npu_reshape_and_cache):
|
||||
"""Test forward pass in normal V1 situation"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_mask = torch.randn(1, 1, 10, 10)
|
||||
metadata.query_lens = torch.tensor([10])
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_paged_attention.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu.npu_format_cast')
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
@patch('torch_npu._npu_paged_attention_splitfuse')
|
||||
@patch('vllm_ascend.attention.attention_v1.is_310p', return_value=True)
|
||||
def test_forward_310p_device(self, mock_is_310p, mock_paged_attention,
|
||||
mock_npu_reshape_and_cache,
|
||||
mock_npu_format_cast):
|
||||
"""Test forward pass on 310P device"""
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_mask = torch.randn(1, 1, 10, 10)
|
||||
metadata.query_lens = torch.tensor([10])
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
|
||||
mock_npu_format_cast.return_value = metadata.attn_mask
|
||||
output = self.impl.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
|
||||
mock_paged_attention.assert_called_once()
|
||||
assert output.shape == (10, 8 * 64)
|
||||
|
||||
@patch('torch_npu._npu_reshape_and_cache')
|
||||
def test_forward_raise_error(self, mock_paged_attention):
|
||||
query = torch.randn(10, 8 * 64)
|
||||
key = torch.randn(10, 8 * 64)
|
||||
value = torch.randn(10, 8 * 64)
|
||||
kv_cache = torch.empty(2, 5, 128, 8, 64)
|
||||
metadata = self.attn_metadata
|
||||
metadata.attn_mask = torch.randn(1, 1, 10, 10)
|
||||
metadata.query_lens = torch.tensor([10])
|
||||
metadata.seq_lens = torch.tensor([10])
|
||||
metadata.block_tables = torch.zeros(1, 5, dtype=torch.long)
|
||||
metadata.num_actual_tokens = 10
|
||||
metadata.slot_mapping = torch.zeros(10, dtype=torch.long)
|
||||
layer = self.layer_no_quant
|
||||
|
||||
with self.assertRaises(NotImplementedError):
|
||||
self.impl_error.forward(layer,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
kv_cache,
|
||||
metadata,
|
||||
trace_flag=False)
|
||||
631
tests/ut/attention/test_mla_v1.py
Normal file
631
tests/ut/attention/test_mla_v1.py
Normal file
@@ -0,0 +1,631 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import torch
|
||||
from vllm.distributed.parallel_state import GroupCoordinator
|
||||
from vllm.model_executor.layers.linear import LinearBase
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.attention.attention_v1 import AscendAttentionState
|
||||
from vllm_ascend.attention.mla_v1 import (AscendMLABackend,
|
||||
AscendMLADecodeMetadata,
|
||||
AscendMLAImpl, AscendMLAMetadata,
|
||||
AscendMLAMetadataBuilder,
|
||||
AscendMLAPrefillMetadata)
|
||||
|
||||
|
||||
class TestAscendMLABackend(TestBase):
|
||||
|
||||
def test_get_name(self):
|
||||
self.assertEqual(AscendMLABackend.get_name(), "ASCEND_MLA")
|
||||
|
||||
def test_get_metadata_cls(self):
|
||||
self.assertEqual(AscendMLABackend.get_metadata_cls(),
|
||||
AscendMLAMetadata)
|
||||
|
||||
def test_get_builder_cls(self):
|
||||
self.assertEqual(AscendMLABackend.get_builder_cls(),
|
||||
AscendMLAMetadataBuilder)
|
||||
|
||||
def test_get_kv_cache_shape(self):
|
||||
result = AscendMLABackend.get_kv_cache_shape(2, 4, 8, 128)
|
||||
self.assertEqual(result, (2, 4, 8, 128))
|
||||
|
||||
def test_get_impl_cls(self):
|
||||
result = AscendMLABackend.get_impl_cls()
|
||||
self.assertEqual(result, AscendMLAImpl)
|
||||
|
||||
|
||||
class TestAscendMLAPrefillMetadata(TestBase):
|
||||
|
||||
def test_ascend_mla_prefill_metadata_default(self):
|
||||
attn_mask = torch.tensor([[1, 0], [1, 1]], dtype=torch.bool)
|
||||
query_lens = [1, 2]
|
||||
seq_lens = [2, 2]
|
||||
context_lens = torch.tensor([1, 2])
|
||||
input_positions = torch.tensor([0, 1, 0, 1])
|
||||
query_start_loc = torch.tensor([0, 1, 3])
|
||||
block_table = torch.tensor([[0, 1], [2, 3]])
|
||||
max_query_len = 2
|
||||
max_seq_lens = 2
|
||||
|
||||
metadata = AscendMLAPrefillMetadata(attn_mask=attn_mask,
|
||||
query_lens=query_lens,
|
||||
seq_lens=seq_lens,
|
||||
context_lens=context_lens,
|
||||
input_positions=input_positions,
|
||||
query_start_loc=query_start_loc,
|
||||
block_table=block_table,
|
||||
max_query_len=max_query_len,
|
||||
max_seq_lens=max_seq_lens)
|
||||
self.assertIs(metadata.attn_mask, attn_mask)
|
||||
self.assertEqual(metadata.query_lens, query_lens)
|
||||
self.assertEqual(metadata.seq_lens, seq_lens)
|
||||
self.assertIs(metadata.context_lens, context_lens)
|
||||
self.assertIs(metadata.input_positions, input_positions)
|
||||
self.assertIs(metadata.query_start_loc, query_start_loc)
|
||||
self.assertIs(metadata.block_table, block_table)
|
||||
self.assertEqual(metadata.max_query_len, max_query_len)
|
||||
self.assertEqual(metadata.max_seq_lens, max_seq_lens)
|
||||
self.assertIsNone(metadata.chunked_context)
|
||||
|
||||
def test_ascend_mla_prefill_metadata_with_chunked_context(self):
|
||||
cu_seq_lens = torch.tensor([0, 2, 4])
|
||||
starts = torch.tensor([0, 2])
|
||||
seq_tot = [2, 2]
|
||||
max_seq_lens = [2, 2]
|
||||
workspace = torch.randn(2, 4)
|
||||
chunk_seq_lens = torch.tensor([2, 2])
|
||||
|
||||
chunked_context = AscendMLAPrefillMetadata.ChunkedContextMetadata(
|
||||
cu_seq_lens=cu_seq_lens,
|
||||
starts=starts,
|
||||
seq_tot=seq_tot,
|
||||
max_seq_lens=max_seq_lens,
|
||||
workspace=workspace,
|
||||
chunk_seq_lens=chunk_seq_lens)
|
||||
|
||||
metadata = AscendMLAPrefillMetadata(
|
||||
attn_mask=torch.tensor([[1, 0], [1, 1]], dtype=torch.bool),
|
||||
query_lens=[1, 2],
|
||||
seq_lens=[2, 2],
|
||||
context_lens=torch.tensor([1, 2]),
|
||||
input_positions=torch.tensor([0, 1, 0, 1]),
|
||||
query_start_loc=torch.tensor([0, 1, 3]),
|
||||
block_table=torch.tensor([[0, 1], [2, 3]]),
|
||||
max_query_len=2,
|
||||
max_seq_lens=2,
|
||||
chunked_context=chunked_context)
|
||||
|
||||
self.assertIsNotNone(metadata.chunked_context)
|
||||
self.assertIs(metadata.chunked_context.cu_seq_lens, cu_seq_lens)
|
||||
self.assertIs(metadata.chunked_context.starts, starts)
|
||||
self.assertEqual(metadata.chunked_context.seq_tot, seq_tot)
|
||||
self.assertEqual(metadata.chunked_context.max_seq_lens, max_seq_lens)
|
||||
self.assertIs(metadata.chunked_context.workspace, workspace)
|
||||
self.assertIs(metadata.chunked_context.chunk_seq_lens, chunk_seq_lens)
|
||||
|
||||
|
||||
class TestAscendMLADecodeMetadata(TestBase):
|
||||
|
||||
def test_ascend_mla_decode_metadata_default(self):
|
||||
input_positions = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]])
|
||||
block_table = torch.tensor([[0, 3, 2, 1], [0, 2, 1, 3]])
|
||||
seq_lens = torch.tensor([[2], [3]])
|
||||
max_seq_lens = 4
|
||||
seq_lens_list = [2, 3]
|
||||
attn_mask = None
|
||||
|
||||
metadata = AscendMLADecodeMetadata(input_positions, block_table,
|
||||
seq_lens, max_seq_lens,
|
||||
seq_lens_list, attn_mask)
|
||||
|
||||
self.assertIs(metadata.input_positions, input_positions)
|
||||
self.assertIs(metadata.block_table, block_table)
|
||||
self.assertIs(metadata.seq_lens, seq_lens)
|
||||
self.assertEqual(metadata.max_seq_lens, max_seq_lens)
|
||||
self.assertEqual(metadata.seq_lens_list, seq_lens_list)
|
||||
self.assertIsNone(attn_mask)
|
||||
|
||||
|
||||
class TestAscendMLAMetadata(TestBase):
|
||||
|
||||
def test_ascend_mla_metadata_default(self):
|
||||
num_actual_tokens = 100
|
||||
slot_mapping = torch.randn(100, 4, 1024)
|
||||
query_start_loc = torch.tensor([1, 2, 3, 4])
|
||||
seq_lens = [30, 50]
|
||||
block_tables = torch.randint(0, 100, (100, 4))
|
||||
|
||||
num_decodes = 4
|
||||
num_decode_tokens = 8
|
||||
num_prefills = 8
|
||||
|
||||
num_input_tokens = 2
|
||||
|
||||
query_lens = None
|
||||
head_dim = None
|
||||
attn_mask = None
|
||||
attn_state = AscendAttentionState.ChunkedPrefill
|
||||
|
||||
decode = None
|
||||
prefill = None
|
||||
|
||||
metadata = AscendMLAMetadata(num_actual_tokens, slot_mapping,
|
||||
query_start_loc, seq_lens, block_tables,
|
||||
num_decodes, num_decode_tokens,
|
||||
num_prefills, num_input_tokens,
|
||||
query_lens, head_dim, attn_mask,
|
||||
attn_state, decode, prefill)
|
||||
|
||||
self.assertEqual(metadata.num_actual_tokens, num_actual_tokens)
|
||||
self.assertIs(metadata.slot_mapping, slot_mapping)
|
||||
self.assertIs(metadata.query_start_loc, query_start_loc)
|
||||
self.assertEqual(metadata.seq_lens, seq_lens)
|
||||
self.assertIs(metadata.block_tables, block_tables)
|
||||
self.assertEqual(metadata.num_decodes, num_decodes)
|
||||
self.assertEqual(metadata.num_decode_tokens, num_decode_tokens)
|
||||
self.assertEqual(metadata.num_prefills, num_prefills)
|
||||
self.assertEqual(metadata.num_input_tokens, num_input_tokens)
|
||||
self.assertEqual(metadata.query_lens, query_lens)
|
||||
self.assertEqual(metadata.head_dim, head_dim)
|
||||
self.assertEqual(metadata.attn_mask, attn_mask)
|
||||
self.assertEqual(metadata.attn_state, attn_state)
|
||||
self.assertEqual(metadata.decode, decode)
|
||||
self.assertEqual(metadata.prefill, prefill)
|
||||
|
||||
|
||||
class TestAscendMLAMetadataBuilder(TestBase):
|
||||
|
||||
def test_ascend_mla_metadata_builder_default(self):
|
||||
mock_vllm_config = MagicMock()
|
||||
mock_vllm_config.model_config.max_model_len = 1024
|
||||
mock_vllm_config.model_config.get_head_size.return_value = 64
|
||||
mock_vllm_config.model_config.dtype = torch.float16
|
||||
mock_vllm_config.cache_config.block_size = 16
|
||||
mock_vllm_config.scheduler_config.max_num_seqs = 4
|
||||
mock_vllm_config.scheduler_config.chunked_prefill_enabled = False
|
||||
mock_device = 'cpu'
|
||||
|
||||
ascend_config = MagicMock()
|
||||
with patch("vllm_ascend.attention.mla_v1.get_ascend_config",
|
||||
return_value=ascend_config):
|
||||
builder = AscendMLAMetadataBuilder(mock_vllm_config, mock_device)
|
||||
|
||||
self.assertEqual(builder.block_size,
|
||||
mock_vllm_config.cache_config.block_size)
|
||||
self.assertEqual(
|
||||
builder.chunked_prefill_enabled,
|
||||
mock_vllm_config.scheduler_config.chunked_prefill_enabled)
|
||||
|
||||
def test_reorder_batch(self):
|
||||
ascend_config = MagicMock()
|
||||
|
||||
mock_vllm_config = MagicMock()
|
||||
mock_vllm_config.model_config.max_model_len = 1024
|
||||
mock_vllm_config.cache_config.block_size = 16
|
||||
mock_vllm_config.scheduler_config.max_num_seqs = 4
|
||||
mock_vllm_config.scheduler_config.chunked_prefill_enabled = False
|
||||
mock_device = 'cpu'
|
||||
|
||||
with patch("vllm_ascend.attention.mla_v1.get_ascend_config",
|
||||
return_value=ascend_config):
|
||||
builder = AscendMLAMetadataBuilder(mock_vllm_config, mock_device)
|
||||
builder.decode_threshold = 1
|
||||
|
||||
input_batch = MagicMock()
|
||||
input_batch.req_ids = [0, 1, 2, 3]
|
||||
|
||||
scheduler_output = MagicMock()
|
||||
scheduler_output.num_scheduled_tokens = {0: 1, 1: 3, 2: 1, 3: 2}
|
||||
scheduler_output.scheduled_spec_decode_tokens = {
|
||||
0: [],
|
||||
1: [1],
|
||||
2: [],
|
||||
3: []
|
||||
}
|
||||
|
||||
input_batch.swap_states = MagicMock()
|
||||
|
||||
modified = builder.reorder_batch(input_batch, scheduler_output)
|
||||
|
||||
self.assertTrue(modified)
|
||||
input_batch.swap_states.assert_called_once_with(1, 2)
|
||||
|
||||
|
||||
class TestAscendMLAImpl(TestBase):
|
||||
|
||||
@patch('vllm.distributed.parallel_state._TP',
|
||||
new_callable=lambda: MagicMock(spec=GroupCoordinator))
|
||||
@patch("vllm.distributed.get_tensor_model_parallel_world_size",
|
||||
return_value=2)
|
||||
@patch("vllm_ascend.attention.mla_v1.get_current_vllm_config")
|
||||
@patch("vllm_ascend.attention.mla_v1.get_ascend_config")
|
||||
def setUp(self, ascend_config, get_current_vllm_config, mock_get_tp_size,
|
||||
mock_tp):
|
||||
mock_tp.world_size = 2
|
||||
vllm_config = MagicMock()
|
||||
speculative_config = MagicMock()
|
||||
model_config = MagicMock()
|
||||
speculative_config.num_speculative_tokens = 4
|
||||
vllm_config.speculative_config = speculative_config
|
||||
model_config.dtype = torch.float16
|
||||
vllm_config.model_config = model_config
|
||||
get_current_vllm_config.return_value = vllm_config
|
||||
|
||||
num_heads = 256
|
||||
head_size = 1024
|
||||
scale = 0.1
|
||||
num_kv_heads = 8
|
||||
kv_cache_dtype = "auto"
|
||||
|
||||
kv_a_layernorm = MagicMock()
|
||||
kv_a_layernorm.weight = torch.randn(96)
|
||||
kv_a_layernorm.variance_epsilon = 1e-6
|
||||
kwargs = {
|
||||
"q_lora_rank": 64,
|
||||
"kv_lora_rank": 32,
|
||||
"qk_nope_head_dim": 64,
|
||||
"qk_rope_head_dim": 32,
|
||||
"qk_head_dim": 96,
|
||||
"v_head_dim": 128,
|
||||
"rotary_emb": MagicMock(),
|
||||
"q_proj": MagicMock(),
|
||||
"kv_b_proj": MagicMock(),
|
||||
"o_proj": MagicMock(),
|
||||
"kv_a_proj_with_mqa": MagicMock(),
|
||||
"kv_a_layernorm": kv_a_layernorm,
|
||||
}
|
||||
|
||||
self.impl = AscendMLAImpl(num_heads=num_heads,
|
||||
head_size=head_size,
|
||||
scale=scale,
|
||||
num_kv_heads=num_kv_heads,
|
||||
alibi_slopes=None,
|
||||
sliding_window=None,
|
||||
kv_cache_dtype=kv_cache_dtype,
|
||||
blocksparse_params=None,
|
||||
logits_soft_cap=None,
|
||||
attn_type=None,
|
||||
kv_sharing_target_layer_name=None,
|
||||
**kwargs)
|
||||
|
||||
def test_init(self):
|
||||
self.assertEqual(self.impl.num_heads, 256)
|
||||
self.assertEqual(self.impl.head_size, 1024)
|
||||
self.assertEqual(self.impl.scale, 0.1)
|
||||
self.assertEqual(self.impl.num_kv_heads, 8)
|
||||
self.assertEqual(self.impl.kv_cache_dtype, "auto")
|
||||
self.assertEqual(self.impl.q_lora_rank, 64)
|
||||
self.assertEqual(self.impl.kv_lora_rank, 32)
|
||||
self.assertEqual(self.impl.qk_nope_head_dim, 64)
|
||||
self.assertEqual(self.impl.qk_rope_head_dim, 32)
|
||||
self.assertEqual(self.impl.qk_head_dim, 96)
|
||||
self.assertEqual(self.impl.v_head_dim, 128)
|
||||
self.assertIsNotNone(self.impl.rotary_emb)
|
||||
self.assertIsNotNone(self.impl.q_proj)
|
||||
self.assertIsNotNone(self.impl.kv_b_proj)
|
||||
self.assertIsNotNone(self.impl.o_proj)
|
||||
self.assertIsNotNone(self.impl.kv_a_proj_with_mqa)
|
||||
self.assertIsNotNone(self.impl.kv_a_layernorm)
|
||||
self.assertEqual(self.impl.num_queries_per_kv, 32)
|
||||
self.assertEqual(self.impl.tp_size, 2)
|
||||
|
||||
def test_v_up_proj(self):
|
||||
batch_size = 4
|
||||
x = torch.randn(batch_size, self.impl.num_heads,
|
||||
self.impl.kv_lora_rank)
|
||||
|
||||
if not hasattr(self.impl, 'W_UV') or self.impl.W_UV is None:
|
||||
self.impl.W_UV = torch.randn(self.impl.num_heads,
|
||||
self.impl.kv_lora_rank,
|
||||
self.impl.v_head_dim)
|
||||
result = self.impl._v_up_proj(x)
|
||||
|
||||
self.assertEqual(result.shape[0], batch_size)
|
||||
self.assertEqual(result.shape[1],
|
||||
self.impl.num_heads * self.impl.v_head_dim)
|
||||
|
||||
def test_q_proj_and_k_up_proj(self):
|
||||
batch_size = 4
|
||||
x = torch.randn(batch_size, self.impl.num_heads, self.impl.qk_head_dim)
|
||||
q_proj_output = torch.randn(batch_size, self.impl.num_heads,
|
||||
self.impl.qk_head_dim)
|
||||
self.impl.q_proj.return_value = (q_proj_output, )
|
||||
if not hasattr(self.impl, 'W_UK_T') or self.impl.W_UK_T is None:
|
||||
self.impl.W_UK_T = torch.randn(self.impl.num_heads,
|
||||
self.impl.qk_nope_head_dim,
|
||||
self.impl.kv_lora_rank)
|
||||
result = self.impl._q_proj_and_k_up_proj(x)
|
||||
ql_nope, q_pe = result
|
||||
self.assertEqual(ql_nope.shape[0], batch_size)
|
||||
self.assertEqual(ql_nope.shape[1], self.impl.num_heads)
|
||||
self.assertEqual(ql_nope.shape[2], self.impl.kv_lora_rank)
|
||||
self.assertEqual(q_pe.shape[0], batch_size)
|
||||
self.assertEqual(q_pe.shape[1], self.impl.num_heads)
|
||||
self.assertEqual(q_pe.shape[2], self.impl.qk_rope_head_dim)
|
||||
|
||||
def test_process_weights_after_loading(self):
|
||||
layer = MagicMock(spec=LinearBase)
|
||||
layer.input_size_per_partition = 10
|
||||
quant_method = MagicMock()
|
||||
apply = MagicMock()
|
||||
quant_method.apply = apply
|
||||
layer.quant_method = quant_method
|
||||
shape_0 = self.impl.num_heads * (self.impl.qk_nope_head_dim +
|
||||
self.impl.v_head_dim)
|
||||
shape_1 = self.impl.kv_lora_rank
|
||||
layer.weight = torch.randn(shape_0, shape_1)
|
||||
self.impl.kv_b_proj = layer
|
||||
apply.return_value = layer.weight.T
|
||||
self.impl.process_weights_after_loading(torch.bfloat16)
|
||||
|
||||
self.assertEqual(self.impl.W_UK_T.shape[0], self.impl.num_heads)
|
||||
self.assertEqual(self.impl.W_UK_T.shape[1], self.impl.qk_nope_head_dim)
|
||||
self.assertEqual(self.impl.W_UK_T.shape[2], self.impl.kv_lora_rank)
|
||||
|
||||
self.assertEqual(self.impl.W_UV.shape[0], self.impl.num_heads)
|
||||
self.assertEqual(self.impl.W_UV.shape[1], self.impl.kv_lora_rank)
|
||||
self.assertEqual(self.impl.W_UV.shape[2], self.impl.v_head_dim)
|
||||
|
||||
def test_compute_prefill_context_none(self):
|
||||
batch_size = 4
|
||||
kv_cache = torch.randn(10, 1, 1, 192)
|
||||
query = torch.randn(batch_size, self.impl.num_heads,
|
||||
self.impl.qk_head_dim)
|
||||
metadata = MagicMock()
|
||||
metadata.prefill = None
|
||||
prefix_out = torch.randn(2, 16, 128)
|
||||
prefix_lse = torch.randn(2, 16, 8)
|
||||
q_pe = query[..., self.impl.qk_nope_head_dim:]
|
||||
q_nope = query[..., :self.impl.qk_nope_head_dim]
|
||||
|
||||
out, lse = self.impl._compute_prefill_context(q_nope, q_pe, kv_cache,
|
||||
32, metadata, prefix_out,
|
||||
prefix_lse)
|
||||
|
||||
self.assertTrue(torch.equal(prefix_out, out))
|
||||
self.assertTrue(torch.equal(prefix_lse, lse))
|
||||
|
||||
@patch("torch_npu.atb.npu_paged_cache_load")
|
||||
@patch("torch_npu.atb.npu_ring_mla")
|
||||
def test_compute_prefill_context(self, mock_ring, mock_load):
|
||||
S, N, D, VD = 2, self.impl.num_heads, self.impl.qk_head_dim, self.impl.v_head_dim
|
||||
_, AND = self.impl.qk_rope_head_dim, self.impl.qk_nope_head_dim
|
||||
latent_kv_dim = self.impl.kv_lora_rank
|
||||
num_blocks, block_size = 100, 20
|
||||
query = torch.randn(S, N, D)
|
||||
q_nope = query[..., :self.impl.qk_nope_head_dim]
|
||||
q_pe = query[..., self.impl.qk_nope_head_dim:]
|
||||
kv_cache_0 = torch.randn(num_blocks, block_size, N, latent_kv_dim)
|
||||
kv_cache_1 = torch.randn(num_blocks, block_size, N, D)
|
||||
kv_cache = [kv_cache_0, kv_cache_1]
|
||||
prefix_out = torch.randn(S, N, 128)
|
||||
prefix_lse = torch.randn(S, N)
|
||||
|
||||
self.impl.kv_b_proj.return_value = (torch.randn(8, N, VD + AND), )
|
||||
|
||||
chunk_ctx = MagicMock()
|
||||
chunk_ctx.seq_tot = [8]
|
||||
chunk_ctx.chunk_seq_lens = [torch.tensor([8])]
|
||||
chunk_ctx.starts = [torch.tensor([0])]
|
||||
|
||||
prefill_meta = MagicMock()
|
||||
prefill_meta.chunked_context = chunk_ctx
|
||||
prefill_meta.query_lens = [8]
|
||||
prefill_meta.block_table = torch.randint(0, 100, (S, 4))
|
||||
|
||||
meta = MagicMock()
|
||||
meta.prefill = prefill_meta
|
||||
self.impl.prefill_mask = torch.triu(
|
||||
torch.ones(512, 512, device=q_nope.device, dtype=q_nope.dtype), 1)
|
||||
|
||||
out, lse = self.impl._compute_prefill_context(q_nope, q_pe, kv_cache,
|
||||
32, meta, prefix_out,
|
||||
prefix_lse)
|
||||
|
||||
mock_load.assert_called_once()
|
||||
mock_ring.assert_called_once()
|
||||
|
||||
self.assertEqual(out.shape, prefix_out.shape)
|
||||
self.assertEqual(lse.shape, prefix_lse.shape)
|
||||
|
||||
@patch("vllm_ascend.attention.mla_v1.AscendMLAImpl._v_up_proj")
|
||||
@patch("torch_npu.npu_fused_infer_attention_score")
|
||||
def test_forward_decode_without_graph(self,
|
||||
mock_npu_fused_infer_attention_score,
|
||||
mock_up_proj):
|
||||
num_tokens = 100
|
||||
block_size = 4
|
||||
q_nope = torch.randn(num_tokens, self.impl.num_heads,
|
||||
self.impl.qk_nope_head_dim)
|
||||
q_pe = torch.randn(num_tokens, self.impl.num_heads,
|
||||
self.impl.qk_rope_head_dim)
|
||||
k_nope = torch.randn(num_tokens, self.impl.num_heads,
|
||||
self.impl.qk_nope_head_dim)
|
||||
k_pe = torch.randn(num_tokens, self.impl.num_heads,
|
||||
self.impl.qk_rope_head_dim)
|
||||
metadata = MagicMock()
|
||||
metadata.decode = MagicMock()
|
||||
metadata.decode.block_table = MagicMock()
|
||||
metadata.decode.seq_lens = 10
|
||||
mock_npu_fused_infer_attention_score.return_value = [
|
||||
torch.randn(num_tokens, self.impl.num_heads,
|
||||
self.impl.kv_lora_rank), None
|
||||
]
|
||||
mock_up_proj.return_value = torch.randn(num_tokens,
|
||||
self.impl.num_heads,
|
||||
self.impl.v_head_dim)
|
||||
result = self.impl._forward_decode(q_nope, q_pe, k_nope, k_pe,
|
||||
block_size, metadata)
|
||||
self.assertEqual(result.shape[0], num_tokens)
|
||||
self.assertEqual(result.shape[1], self.impl.num_heads)
|
||||
self.assertEqual(result.shape[2], self.impl.v_head_dim)
|
||||
mock_up_proj.assert_called_once()
|
||||
mock_npu_fused_infer_attention_score.assert_called_once()
|
||||
|
||||
@patch("vllm_ascend.attention.mla_v1.npu_prefetch")
|
||||
def test_mla_preprocess(self, magic_npu_fetch):
|
||||
magic_npu_fetch.return_value = MagicMock()
|
||||
batch_size = 4
|
||||
seq_len = 8
|
||||
hidden_size = 1024
|
||||
hidden_states = torch.randn(batch_size * seq_len, hidden_size)
|
||||
|
||||
kv_cache = MagicMock()
|
||||
|
||||
attn_metadata = MagicMock()
|
||||
attn_metadata.num_decodes = 2
|
||||
attn_metadata.num_prefills = 2
|
||||
attn_metadata.num_decode_tokens = 2
|
||||
attn_metadata.num_actual_tokens = 4
|
||||
num_prefill_tokens = 2
|
||||
attn_metadata.slot_mapping = torch.arange(4)
|
||||
attn_metadata.decode.cos = torch.randn(2, 64)
|
||||
attn_metadata.decode.sin = torch.randn(2, 64)
|
||||
attn_metadata.prefill.cos = torch.randn(2, 64)
|
||||
attn_metadata.prefill.sin = torch.randn(2, 64)
|
||||
|
||||
self.impl.q_a_proj = MagicMock()
|
||||
self.impl.q_a_layernorm = MagicMock()
|
||||
self.impl.q_a_layernorm.return_value = torch.randn(
|
||||
attn_metadata.num_actual_tokens, self.impl.num_heads,
|
||||
self.impl.qk_rope_head_dim)
|
||||
self.impl.kv_a_proj_with_mqa = MagicMock()
|
||||
self.impl.kv_a_proj_with_mqa.return_value = [
|
||||
torch.randn(num_prefill_tokens, self.impl.num_heads,
|
||||
self.impl.qk_nope_head_dim + self.impl.kv_lora_rank)
|
||||
]
|
||||
self.impl.q_proj = MagicMock()
|
||||
self.impl.q_proj.return_value = [
|
||||
torch.randn(num_prefill_tokens, self.impl.num_heads,
|
||||
self.impl.qk_head_dim)
|
||||
]
|
||||
self.impl.kv_b_proj = MagicMock()
|
||||
self.impl.kv_b_proj.return_value = [
|
||||
torch.randn(num_prefill_tokens, self.impl.num_heads,
|
||||
self.impl.v_head_dim + self.impl.qk_nope_head_dim)
|
||||
]
|
||||
self.impl.rope_single = MagicMock(side_effect=lambda x, cos, sin: x)
|
||||
self.impl.exec_kv_decode = MagicMock()
|
||||
self.impl.exec_kv_decode.return_value = [MagicMock(), MagicMock()]
|
||||
self.impl.exec_kv_prefill = MagicMock()
|
||||
self.impl.exec_kv_prefill.return_value = [
|
||||
torch.randn(num_prefill_tokens, self.impl.num_heads,
|
||||
self.impl.qk_rope_head_dim),
|
||||
torch.randn(num_prefill_tokens, self.impl.num_heads,
|
||||
self.impl.kv_lora_rank)
|
||||
]
|
||||
self.impl._q_proj_and_k_up_proj = MagicMock()
|
||||
self.impl._q_proj_and_k_up_proj.return_value = [
|
||||
MagicMock(), MagicMock()
|
||||
]
|
||||
self.impl.num_kv_heads = self.impl.num_heads
|
||||
|
||||
decode_res, prefill_res = self.impl._mla_preprocess(
|
||||
hidden_states, kv_cache, attn_metadata, need_gather_q_kv=False)
|
||||
|
||||
self.assertIsNotNone(decode_res)
|
||||
self.assertIsNotNone(prefill_res)
|
||||
|
||||
@patch("torch_npu.npu_kv_rmsnorm_rope_cache")
|
||||
def test_exec_kv_prefill(self, mock_kv_rmsnorm_rope_cache):
|
||||
B = 2
|
||||
N = self.impl.num_kv_heads
|
||||
D = self.impl.kv_lora_rank + self.impl.qk_rope_head_dim
|
||||
kv_no_split = torch.randn(B, N, D)
|
||||
self.impl.enable_kv_nz = None
|
||||
self.impl.kv_a_layernorm.weight = MagicMock()
|
||||
self.impl.kv_a_layernorm.variance_epsilon = MagicMock()
|
||||
cos = MagicMock()
|
||||
sin = MagicMock()
|
||||
slots = MagicMock()
|
||||
kv_cache = [MagicMock(), MagicMock()]
|
||||
|
||||
mock_kv_rmsnorm_rope_cache.return_value = [
|
||||
None, None,
|
||||
torch.randn(B, N, 1, self.impl.qk_rope_head_dim),
|
||||
torch.randn(B, N, 1, self.impl.kv_lora_rank)
|
||||
]
|
||||
|
||||
k_pe, k_nope = self.impl.exec_kv_prefill(kv_no_split, cos, sin,
|
||||
kv_cache, slots)
|
||||
|
||||
self.assertEqual(k_pe.shape[-1], self.impl.qk_rope_head_dim)
|
||||
self.assertEqual(k_nope.shape[-1], self.impl.kv_lora_rank)
|
||||
|
||||
@patch("torch_npu.npu_kv_rmsnorm_rope_cache")
|
||||
def test_exec_kv_decode(self, mock_kv_rmsnorm_rope_cache):
|
||||
B = 2
|
||||
N = self.impl.num_kv_heads
|
||||
D = self.impl.kv_lora_rank + self.impl.qk_rope_head_dim
|
||||
kv_no_split = torch.randn(B, N, D)
|
||||
self.impl.enable_kv_nz = None
|
||||
self.impl.kv_a_layernorm.weight = MagicMock()
|
||||
self.impl.kv_a_layernorm.variance_epsilon = MagicMock()
|
||||
cos = MagicMock()
|
||||
sin = MagicMock()
|
||||
slots = MagicMock()
|
||||
kv_cache = [MagicMock(), MagicMock()]
|
||||
|
||||
mock_kv_rmsnorm_rope_cache.return_value = [
|
||||
torch.randn(B, N, 1, self.impl.qk_rope_head_dim),
|
||||
torch.randn(B, N, 1, self.impl.kv_lora_rank), None, None
|
||||
]
|
||||
|
||||
k_pe, k_nope = self.impl.exec_kv_decode(kv_no_split, cos, sin,
|
||||
kv_cache, slots)
|
||||
|
||||
self.assertEqual(k_pe.shape[-1], self.impl.qk_rope_head_dim)
|
||||
self.assertEqual(k_nope.shape[-1], self.impl.kv_lora_rank)
|
||||
|
||||
@patch("torch.npu.stream")
|
||||
@patch("vllm_ascend.attention.mla_v1.get_multistream_comm_context")
|
||||
@patch("torch_npu.npu_fused_infer_attention_score")
|
||||
def test_forward_decode(self, mock_npu_fused_infer_attention_score,
|
||||
mock_get_multistream_comm_context,
|
||||
mock_npu_stream):
|
||||
B = 2
|
||||
N = self.impl.num_kv_heads
|
||||
BS = 100
|
||||
HD = self.impl.v_head_dim
|
||||
self.impl.kv_lora_rank = 256
|
||||
self.impl.spec_token_num = 1
|
||||
self.impl._v_up_proj = MagicMock()
|
||||
self.impl._v_up_proj.return_value = torch.randn(B, N, HD)
|
||||
q_nope = torch.randn(B, N, self.impl.qk_nope_head_dim)
|
||||
q_pe = torch.randn(B, N, self.impl.qk_rope_head_dim)
|
||||
k_nope = torch.randn(BS, N, self.impl.kv_lora_rank)
|
||||
k_pe = torch.randn(BS, N, self.impl.qk_rope_head_dim)
|
||||
attn_metadata = MagicMock()
|
||||
attn_metadata.attn_state = AscendAttentionState.SpecDecoding
|
||||
attn_metadata.decode = MagicMock()
|
||||
attn_metadata.decode.actual_seq_lengths_q = MagicMock()
|
||||
attn_metadata.decode.seq_lens_list = MagicMock()
|
||||
self.impl.enable_kv_nz = True
|
||||
|
||||
mock_npu_fused_infer_attention_score.return_value = [
|
||||
torch.randn(B, N, self.impl.kv_lora_rank), None
|
||||
]
|
||||
mock_get_multistream_comm_context.return_value = None
|
||||
|
||||
result = self.impl._forward_decode(q_nope, q_pe, k_nope, k_pe, BS,
|
||||
attn_metadata)
|
||||
|
||||
self.assertEqual(result.shape[0], B)
|
||||
self.assertEqual(result.shape[1], N)
|
||||
self.assertEqual(result.shape[2], HD)
|
||||
|
||||
self.impl.enable_kv_nz = False
|
||||
attn_metadata.attn_state = None
|
||||
mock_return_value = MagicMock()
|
||||
mock_get_multistream_comm_context.return_value = mock_return_value
|
||||
mock_return_value.before_comm_event = MagicMock()
|
||||
mock_return_value.comm_stream = MagicMock()
|
||||
mock_npu_stream.return_value = MagicMock()
|
||||
|
||||
result = self.impl._forward_decode(q_nope, q_pe, k_nope, k_pe, BS,
|
||||
attn_metadata)
|
||||
|
||||
self.assertEqual(result.shape[0], B)
|
||||
self.assertEqual(result.shape[1], N)
|
||||
self.assertEqual(result.shape[2], HD)
|
||||
Reference in New Issue
Block a user