[Bugfix] Fix model run _npu_flash_attention hang issue (#4410)
Fix model run _npu_flash_attention in _forward_prefill_no_cache hang
issue, it was caused by wrong attention mask dtype.
### How was this patch tested?
Yes, tesed on Qwen2.5-VL and Qwen2.5-Omni
- vLLM version: v0.11.0
- vLLM main:
2918c1b49c
Signed-off-by: Ting FU <futing10@huawei.com>
This commit is contained in:
@@ -74,10 +74,11 @@ class TestAttentionMaskBuilder(TestBase):
|
|||||||
attn_mask = attention_mask_builder.get_attn_mask(
|
attn_mask = attention_mask_builder.get_attn_mask(
|
||||||
max_seq_len=2048, dtype=torch.float16, device=torch.device("cpu"))
|
max_seq_len=2048, dtype=torch.float16, device=torch.device("cpu"))
|
||||||
self.assertEqual(attn_mask.shape, (2048, 2048))
|
self.assertEqual(attn_mask.shape, (2048, 2048))
|
||||||
self.assertEqual(attn_mask[0][-1], torch.tensor(True))
|
self.assertEqual(attn_mask[0][-1],
|
||||||
self.assertEqual(attention_mask_builder._seq_len_cached, 1024)
|
torch.tensor(float("-inf"), dtype=torch.float16))
|
||||||
|
self.assertEqual(attention_mask_builder._seq_len_cached, 2048)
|
||||||
self.assertEqual(attention_mask_builder.attn_mask_cache.shape,
|
self.assertEqual(attention_mask_builder.attn_mask_cache.shape,
|
||||||
(1024, 1024))
|
(2048, 2048))
|
||||||
self.assertEqual(attention_mask_builder.attn_mask_cache[0][-1],
|
self.assertEqual(attention_mask_builder.attn_mask_cache[0][-1],
|
||||||
torch.tensor(float("-inf"), dtype=torch.float16))
|
torch.tensor(float("-inf"), dtype=torch.float16))
|
||||||
|
|
||||||
|
|||||||
@@ -67,8 +67,6 @@ class AttentionMaskBuilder:
|
|||||||
|
|
||||||
def get_attn_mask(self, max_seq_len: int, dtype: torch.dtype,
|
def get_attn_mask(self, max_seq_len: int, dtype: torch.dtype,
|
||||||
device: torch.device):
|
device: torch.device):
|
||||||
if max_seq_len == 2048:
|
|
||||||
return self.chunked_prefill_attn_mask.to(torch.bool)
|
|
||||||
self._update_attn_cache(max_seq_len, dtype)
|
self._update_attn_cache(max_seq_len, dtype)
|
||||||
return self.attn_mask_cache[:max_seq_len, :max_seq_len].contiguous(
|
return self.attn_mask_cache[:max_seq_len, :max_seq_len].contiguous(
|
||||||
).to(device, non_blocking=True)
|
).to(device, non_blocking=True)
|
||||||
|
|||||||
@@ -991,8 +991,8 @@ class NPUModelRunner(LoRAModelRunnerMixin):
|
|||||||
max_seq_len, self.dtype, self.device)
|
max_seq_len, self.dtype, self.device)
|
||||||
# Prefill with cache hit.
|
# Prefill with cache hit.
|
||||||
elif attn_state == AscendAttentionState.PrefillCacheHit:
|
elif attn_state == AscendAttentionState.PrefillCacheHit:
|
||||||
return self.attn_mask_builder.get_attn_mask(
|
return self.attn_mask_builder.get_splitfuse_attn_mask().to(
|
||||||
2048, self.dtype, self.device)
|
torch.bool)
|
||||||
# Decode-only situation.
|
# Decode-only situation.
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|||||||
Reference in New Issue
Block a user