[BugFix] [310p] Fix attention accuracy issue (#6803)

### What this PR does / why we need it?
This pull request resolves an attention accuracy issue by enhancing the
AttentionMaskBuilder310 to correctly handle the maximum model length.
The change ensures that the attention mask generation process is
properly parameterized by the model's configuration, rather than relying
on a fixed internal value. This leads to more accurate attention mask
creation, which is crucial for the correct functioning of the attention
mechanism.
Update fused_moe to main branch.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Qwen3 dense mode & moe model e2e test
- vLLM version: v0.15.0
- vLLM main:
83b47f67b1

---------

Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
pu-zhe
2026-02-26 14:30:39 +08:00
committed by GitHub
parent 9f8b84e5fc
commit e76b69b9ef
8 changed files with 76 additions and 43 deletions

View File

@@ -27,6 +27,7 @@ from vllm_ascend.ascend_forward_context import MoECommType
from vllm_ascend.ops.fused_moe.experts_selector import zero_experts_compute
from vllm_ascend.ops.fused_moe.moe_comm_method import FusedExpertsResult, _MoECommMethods
from vllm_ascend.quantization.methods.base import QuantType
from vllm_ascend.utils import vllm_version_is
from .experts_selector import select_experts
from .moe_comm_method import AllGatherCommImpl310
@@ -153,6 +154,26 @@ class AscendFusedMoE310(FusedMoE):
_MoECommMethods[MoECommType.ALLGATHER] = AllGatherCommImpl310(self.moe_config)
if not vllm_version_is("0.15.0"):
self.runner = self._init_runner()
if not vllm_version_is("0.15.0"):
def _init_runner(self):
from vllm_ascend.ops.fused_moe.fused_moe import AscendMoERunner
return AscendMoERunner(
layer=self,
moe_config=self.moe_config,
router=self.router,
routed_input_transform=self._routed_input_transform,
gate=self.gate,
shared_experts=self.shared_experts,
quant_method=self.quant_method,
reduce_results=self.reduce_results,
enable_dbo=self.vllm_config.parallel_config.enable_dbo,
)
def init_experts_map(self, moe_config):
"""
Initialize expert mapping for MoE (Mixture of Experts) model.