[Attention] add gpt-oss support (#5901)
### What this PR does / why we need it?
Please refer to the following link for the historical conversation
https://github.com/vllm-project/vllm-ascend/pull/4467. We have made
updates in light of the comments from the prior PR review. Given the
refactoring of the attention_v1 component, we have carried out necessary
adjustments to fit the newly revised code.
### Does this PR introduce _any_ user-facing change?
1. Modified the code in the Attention section to adapt to the SWA and
Sink features required by gpt-oss.
2. Modified the code in the MoE section to add support for bias and
swigluoai.
### How was this patch tested?
Please refer to the
https://github.com/vllm-project/vllm-ascend/pull/4467 for performance
tests, on the basis of which the accuracy tests from AIME2024 have been
newly added.

- vLLM version: v0.13.0
- vLLM main:
bde38c11df
---------
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Signed-off-by: mikequan0425 <mikequan0425@foxmail.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Signed-off-by: shenchuxiaofugui <1311027364@qq.com>
Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
Signed-off-by: pu-zhe <zpuaa@outlook.com>
Signed-off-by: liziyu <liziyu16@huawei.com>
Signed-off-by: wangxiaoteng <wangxiaoteng@huawei.com>
Signed-off-by: luomin2005 <luomin2005@huawei.com>
Signed-off-by: whx-sjtu <2952154980@qq.com>
Signed-off-by: SlightwindSec <slightwindsec@gmail.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
Signed-off-by: MrZ20 <2609716663@qq.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leon_tao <taoyao2@huawei.com>
Co-authored-by: nurxat <738457498@qq.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: mikequan <199741451@qq.com>
Co-authored-by: LI SHENGYONG <49200266+shenchuxiaofugui@users.noreply.github.com>
Co-authored-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
Co-authored-by: pu-zhe <zpuaa@outlook.com>
Co-authored-by: luomin2005 <luomin2005@huawei.com>
Co-authored-by: liziyu <56102866+liziyu179@users.noreply.github.com>
Co-authored-by: wangxiaoteng <wangxiaoteng@huawei.com>
Co-authored-by: whx <56632993+whx-sjtu@users.noreply.github.com>
Co-authored-by: Cao Yi <slightwindsec@gmail.com>
Co-authored-by: Icey <1790571317@qq.com>
Co-authored-by: SILONG ZENG <2609716663@qq.com>
This commit is contained in:
@@ -350,6 +350,7 @@ class AscendAttentionBackendImpl(AttentionImpl):
|
||||
logits_soft_cap: float | None,
|
||||
attn_type: str,
|
||||
kv_sharing_target_layer_name: str | None,
|
||||
sinks: torch.Tensor = None,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
self.vllm_config = get_current_vllm_config()
|
||||
@@ -372,6 +373,7 @@ class AscendAttentionBackendImpl(AttentionImpl):
|
||||
self.is_kv_producer = (
|
||||
self.vllm_config.kv_transfer_config is not None and self.vllm_config.kv_transfer_config.is_kv_producer
|
||||
)
|
||||
self.sinks = sinks
|
||||
|
||||
@staticmethod
|
||||
def update_graph_params(
|
||||
@@ -766,6 +768,7 @@ class AscendAttentionBackendImpl(AttentionImpl):
|
||||
attn_metadata.attn_state == AscendAttentionState.DecodeOnly
|
||||
and self.sliding_window is not None
|
||||
and attn_metadata.seq_lens.shape[0] == query.size(0)
|
||||
and self.sinks is None
|
||||
):
|
||||
return self._forward_fia_slidingwindow(query, attn_metadata, output)
|
||||
key, value, block_size, block_table, actual_seq_lengths_kv = self._get_fia_params(key, value, attn_metadata)
|
||||
@@ -778,23 +781,52 @@ class AscendAttentionBackendImpl(AttentionImpl):
|
||||
key = key[:num_tokens]
|
||||
value = value[:num_tokens]
|
||||
# Get workspace from cache or calculate it if not present.
|
||||
attn_output, _ = torch_npu.npu_fused_infer_attention_score(
|
||||
query=query,
|
||||
key=key,
|
||||
value=value,
|
||||
atten_mask=attn_metadata.attn_mask,
|
||||
block_table=block_table,
|
||||
input_layout="TND",
|
||||
block_size=block_size,
|
||||
actual_seq_lengths=attn_metadata.actual_seq_lengths_q,
|
||||
actual_seq_lengths_kv=actual_seq_lengths_kv,
|
||||
num_key_value_heads=self.num_kv_heads,
|
||||
num_heads=self.num_heads,
|
||||
scale=self.scale,
|
||||
sparse_mode=3,
|
||||
)
|
||||
if self.sinks is not None:
|
||||
actual_seq_qlen = attn_metadata.actual_seq_lengths_q
|
||||
if attn_metadata.attn_state == AscendAttentionState.DecodeOnly:
|
||||
actual_seq_qlen = torch.tensor([1] * len(attn_metadata.seq_lens_list), dtype=torch.int32).cumsum(dim=0)
|
||||
if self.sliding_window is not None:
|
||||
atten_mask = attn_metadata.swa_mask
|
||||
sparse_mode = 4
|
||||
else:
|
||||
atten_mask = attn_metadata.attn_mask
|
||||
sparse_mode = 3
|
||||
attn_output, _ = torch_npu.npu_fused_infer_attention_score_v2(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
num_query_heads=self.num_heads,
|
||||
num_key_value_heads=self.num_kv_heads,
|
||||
input_layout="TND",
|
||||
pre_tokens=self.sliding_window if self.sliding_window is not None else SWA_INT_MAX,
|
||||
next_tokens=0,
|
||||
atten_mask=atten_mask,
|
||||
sparse_mode=sparse_mode,
|
||||
softmax_scale=self.scale,
|
||||
block_table=block_table,
|
||||
block_size=block_size,
|
||||
actual_seq_qlen=actual_seq_qlen,
|
||||
actual_seq_kvlen=actual_seq_lengths_kv,
|
||||
learnable_sink=self.sinks,
|
||||
)
|
||||
else:
|
||||
attn_output, _ = torch_npu.npu_fused_infer_attention_score(
|
||||
query=query,
|
||||
key=key,
|
||||
value=value,
|
||||
atten_mask=attn_metadata.attn_mask,
|
||||
block_table=block_table,
|
||||
input_layout="TND",
|
||||
block_size=block_size,
|
||||
actual_seq_lengths=attn_metadata.actual_seq_lengths_q,
|
||||
actual_seq_lengths_kv=actual_seq_lengths_kv,
|
||||
num_key_value_heads=self.num_kv_heads,
|
||||
num_heads=self.num_heads,
|
||||
scale=self.scale,
|
||||
sparse_mode=3,
|
||||
)
|
||||
|
||||
attn_output = attn_output.view(num_tokens, self.num_heads, self.head_size)
|
||||
attn_output = attn_output.view(num_tokens, self.num_heads, self.head_size)
|
||||
output[:num_tokens] = attn_output[:num_tokens]
|
||||
return output
|
||||
|
||||
|
||||
Reference in New Issue
Block a user