Revert "[Perf] Add FIA interface in FA case" (#3553)

Reverts vllm-project/vllm-ascend#3321
The output dimension mismatch and accuracy issue
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

Signed-off-by: ZYang6263 <zy626375@gmail.com>
This commit is contained in:
ZYang6263
2025-10-20 19:56:10 +08:00
committed by GitHub
parent 34c2996ab8
commit b9e2896eb1
2 changed files with 12 additions and 50 deletions

View File

@@ -898,12 +898,9 @@ class NPUModelRunner(LoRAModelRunnerMixin):
# Prefill without cache situation.
elif attn_state == AscendAttentionState.PrefillNoCache:
if torch.version.cann.startswith("8.3"):
return self.attn_mask_builder.get_splitfuse_attn_mask()
else:
max_seq_len = max(seq_lens, default=0)
return self.attn_mask_builder.get_attn_mask(
max_seq_len, self.dtype, self.device)
max_seq_len = max(seq_lens.max().item(), 0)
return self.attn_mask_builder.get_attn_mask(
max_seq_len, self.dtype, self.device)
# Prefill with cache hit.
elif attn_state == AscendAttentionState.PrefillCacheHit:
return self.attn_mask_builder.get_attn_mask(