[BugFix] [310p] Fix attention accuracy issue (#6803)

### What this PR does / why we need it?
This pull request resolves an attention accuracy issue by enhancing the
AttentionMaskBuilder310 to correctly handle the maximum model length.
The change ensures that the attention mask generation process is
properly parameterized by the model's configuration, rather than relying
on a fixed internal value. This leads to more accurate attention mask
creation, which is crucial for the correct functioning of the attention
mechanism.
Update fused_moe to main branch.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Qwen3 dense mode & moe model e2e test
- vLLM version: v0.15.0
- vLLM main:
83b47f67b1

---------

Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
pu-zhe
2026-02-26 14:30:39 +08:00
committed by GitHub
parent 9f8b84e5fc
commit e76b69b9ef
8 changed files with 76 additions and 43 deletions

View File

@@ -24,23 +24,26 @@ def test_qwen3_dense_tp2_fp16():
]
max_tokens = 5
with VllmRunner(
"Qwen/Qwen3-8B",
tensor_parallel_size=2,
enforce_eager=True,
dtype="float16"
"Qwen/Qwen3-8B",
tensor_parallel_size=2,
enforce_eager=True,
dtype="float16",
max_model_len=16384,
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
def test_qwen3_dense_tp4_w8a8():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
"vllm-ascend/Qwen3-32B-W8A8",
tensor_parallel_size=4,
enforce_eager=True,
dtype="float16",
quantization="ascend"
"vllm-ascend/Qwen3-32B-W8A8",
tensor_parallel_size=4,
enforce_eager=True,
dtype="float16",
quantization="ascend",
max_model_len=16384,
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -24,37 +24,42 @@ def test_qwen3_moe_tp4_fp16():
]
max_tokens = 5
with VllmRunner(
"Qwen/Qwen3-30B-A3B",
tensor_parallel_size=4,
enforce_eager=True,
dtype="float16"
"Qwen/Qwen3-30B-A3B",
tensor_parallel_size=4,
enforce_eager=True,
dtype="float16",
max_model_len=16384,
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
def test_qwen3_moe_ep4_fp16():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
"Qwen/Qwen3-30B-A3B",
tensor_parallel_size=4,
enforce_eager=True,
dtype="float16",
enable_expert_parallel=True
"Qwen/Qwen3-30B-A3B",
tensor_parallel_size=4,
enforce_eager=True,
dtype="float16",
enable_expert_parallel=True,
max_model_len=16384,
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
def test_qwen3_moe_tp2_w8a8():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
"vllm-ascend/Qwen3-30B-A3B-W8A8",
tensor_parallel_size=2,
enforce_eager=True,
dtype="float16",
quantization="ascend"
"vllm-ascend/Qwen3-30B-A3B-W8A8",
tensor_parallel_size=2,
enforce_eager=True,
dtype="float16",
quantization="ascend",
max_model_len=16384,
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -24,23 +24,26 @@ def test_qwen3_dense_tp1_fp16():
]
max_tokens = 5
with VllmRunner(
"Qwen/Qwen3-8B",
tensor_parallel_size=1,
enforce_eager=True,
dtype="float16"
"Qwen/Qwen3-8B",
tensor_parallel_size=1,
enforce_eager=True,
dtype="float16",
max_model_len=16384,
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
def test_qwen3_dense_tp1_w8a8():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
"vllm-ascend/Qwen3-8B-W8A8",
tensor_parallel_size=1,
enforce_eager=True,
dtype="float16",
quantization="ascend"
"vllm-ascend/Qwen3-8B-W8A8",
tensor_parallel_size=1,
enforce_eager=True,
dtype="float16",
quantization="ascend",
max_model_len=16384,
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -23,7 +23,8 @@ from vllm_ascend._310p.attention.attention_mask import AttentionMaskBuilder310
class TestAttentionMaskBuilder310(TestBase):
def setUp(self):
self.attention_mask_builder = AttentionMaskBuilder310(torch.device("cpu"))
self.max_seqlen = 4096
self.attention_mask_builder = AttentionMaskBuilder310(torch.device("cpu"), self.max_seqlen)
def test_get_attention_mask_310_for_pooling_model(self):
model_config = MagicMock()
@@ -36,7 +37,7 @@ class TestAttentionMaskBuilder310(TestBase):
mock_format_cast.side_effect = lambda x, y: x
model_config = MagicMock()
attn_mask = self.attention_mask_builder.get_attention_mask(model_config)
self.assertEqual(attn_mask.shape, (1, 128, 2048, 16))
self.assertEqual(attn_mask.shape, (1, self.max_seqlen // 16, self.max_seqlen, 16))
self.assertEqual(attn_mask[0][-1][0][-1], torch.tensor(float("-inf"), dtype=torch.float16))
@patch("torch_npu.npu_format_cast")
@@ -47,7 +48,7 @@ class TestAttentionMaskBuilder310(TestBase):
sliding_window = 128
swa_mask = self.attention_mask_builder.get_swa_mask(torch.float16, sliding_window)
self.assertEqual(swa_mask.shape, (1, 128, 2048, 16))
self.assertEqual(swa_mask.shape, (1, self.max_seqlen // 16, self.max_seqlen, 16))
self.assertEqual(swa_mask[0][-1][0][-1], torch.tensor(float("-inf"), dtype=torch.float16))
self.assertEqual(swa_mask[0][0][-1][0], torch.tensor(float("-inf"), dtype=torch.float16))
@@ -58,4 +59,4 @@ class TestAttentionMaskBuilder310(TestBase):
attn_metadata.query_start_loc = torch.tensor([0, 1, 5])
attn_metadata.seq_lens = torch.tensor([7, 4])
attn_mask = self.attention_mask_builder.get_splitfuse_mask(attn_metadata, torch.device("cpu"))
self.assertEqual(attn_mask.shape, (1, 128, 16, 16))
self.assertEqual(attn_mask.shape, (1, self.max_seqlen // 16, 16, 16))