[Ops][Misc] Refactor and optimize CausalConv1d for Ascend (#7495)

### What this PR does / why we need it?
During the prefill phase of Qwen3-Next and Qwen3.5, the
`torch.ops._C_ascend.causal_conv1d_fn` operator exhibits significant
performance bottlenecks. To address this, we have re-implemented the
optimization using `torch.ops._C_ascend.npu_causal_conv1d_custom`.

### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
1 accuracy test
```
[2026-03-20 16:44:22,961] [ais_bench] [INFO] Start launch task state board ...
+-----------------------------+-----------+------------+-------------+----------+-------------------------------------------+---------------------+
| Task Name                   |   Process | Progress   | Time Cost   | Status   | Log Path                                  | Extend Parameters   |
+=============================+===========+============+=============+==========+===========================================+=====================+
| vllm-api-general-chat/gsm8k |   2918978 | NA         | 0:00:01     | finish   | logs/eval/vllm-api-general-chat/gsm8k.out | None                |
+-----------------------------+-----------+------------+-------------+----------+-------------------------------------------+---------------------+
[2026-03-20 16:44:34,284] [ais_bench] [INFO] Evaluation tasks completed.
[2026-03-20 16:44:34,287] [ais_bench] [INFO] Summarizing evaluation results...
dataset    version    metric    mode      vllm-api-general-chat
---------  ---------  --------  ------  -----------------------
gsm8k      271d0b     accuracy  gen                       96.21
```
2 ut modify test
`pytest -sv
/home/c30006096/vllm-ascend/tests/e2e/nightly/single_node/ops/singlecard_ops/triton/test_causal_conv1d.py::test_ascend_causal_conv1d`

- vLLM version: v0.17.0
- vLLM main:
8b6325758c

Signed-off-by: wenba0 <3054239545@qq.com>
Signed-off-by: jiaojiao <56385650+wenba0@users.noreply.github.com>
This commit is contained in:
jiaojiao
2026-03-24 00:07:12 +08:00
committed by GitHub
parent e942b62d74
commit 1de805ce0a
16 changed files with 907 additions and 554 deletions

View File

@@ -33,6 +33,13 @@ from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch
from vllm_ascend.utils import enable_sp, vllm_version_is
def to_int64_tuple(t):
t = t.to(torch.int64)
if t.dim() == 0:
return (t.item(),)
return tuple(t.tolist())
class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet):
def _forward_core(
self,
@@ -110,16 +117,19 @@ class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet):
if attn_metadata.num_prefills > 0:
if mixed_qkv_non_spec is not None:
conv_weights_T = conv_weights.transpose(0, 1)
mixed_qkv_non_spec = torch.ops._C_ascend.causal_conv1d_fn(
activation_num = 1 if self.activation else 0
mixed_qkv_non_spec = torch.ops._C_ascend.npu_causal_conv1d_custom(
mixed_qkv_non_spec,
conv_weights_T,
self.conv1d.bias,
activation=self.activation,
conv_state=self_kv_cache[0],
has_initial_state=has_initial_state,
non_spec_state_indices_tensor=non_spec_state_indices_tensor,
non_spec_query_start_loc=non_spec_query_start_loc,
bias_opt=self.conv1d.bias,
query_start_loc_opt=to_int64_tuple(non_spec_query_start_loc),
cache_indices_opt=to_int64_tuple(non_spec_state_indices_tensor),
initial_state_mode_opt=to_int64_tuple(has_initial_state),
num_accepted_tokens_opt=[],
activation_mode=activation_num,
pad_slot_id=PAD_SLOT_ID,
run_mode=0,
)
elif attn_metadata.num_decodes > 0:
mixed_qkv_non_spec = causal_conv1d_update(

View File

@@ -32,6 +32,7 @@ from vllm.v1.attention.backends.utils import PAD_SLOT_ID
from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector
from vllm_ascend.ops.triton.fla.fused_qkvzba_split_reshape import fused_qkvzba_split_reshape_cat
from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch
from vllm_ascend.patch.worker.patch_qwen3_5 import to_int64_tuple
from vllm_ascend.utils import enable_sp, vllm_version_is
@@ -167,16 +168,19 @@ class AscendQwen3Next_GatedDeltaNet(Qwen3NextGatedDeltaNet):
if attn_metadata.num_prefills > 0:
if mixed_qkv_non_spec is not None:
conv_weights_T = conv_weights.transpose(0, 1)
mixed_qkv_non_spec = torch.ops._C_ascend.causal_conv1d_fn(
activation_num = 1 if self.activation else 0
mixed_qkv_non_spec = torch.ops._C_ascend.npu_causal_conv1d_custom(
mixed_qkv_non_spec,
conv_weights_T,
self.conv1d.bias,
activation=self.activation,
conv_state=self_kv_cache[0],
has_initial_state=has_initial_state,
non_spec_state_indices_tensor=non_spec_state_indices_tensor,
non_spec_query_start_loc=non_spec_query_start_loc,
bias_opt=self.conv1d.bias,
query_start_loc_opt=to_int64_tuple(non_spec_query_start_loc),
cache_indices_opt=to_int64_tuple(non_spec_state_indices_tensor),
initial_state_mode_opt=to_int64_tuple(has_initial_state),
num_accepted_tokens_opt=[],
activation_mode=activation_num,
pad_slot_id=PAD_SLOT_ID,
run_mode=0,
)
elif attn_metadata.num_decodes > 0:
mixed_qkv_non_spec = causal_conv1d_update(