[Lint]Style: Convert vllm-ascend/ to ruff format(Batch #12) (#6177)

### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
| `vllm_ascend/ops/triton/activation/swiglu_quant.py` |
| `vllm_ascend/ops/triton/batch_invariant/matmul.py` |
| `vllm_ascend/ops/triton/batch_invariant/mean.py` |
| `vllm_ascend/ops/triton/batch_invariant/rmsnorm.py` |
| `vllm_ascend/ops/triton/fla/chunk.py` |
| `vllm_ascend/ops/triton/fla/chunk_delta_h.py` |
| `vllm_ascend/ops/triton/fla/chunk_o.py` |
| `vllm_ascend/ops/triton/fla/chunk_scaled_dot_kkt.py` |
| `vllm_ascend/ops/triton/fla/cumsum.py` |
| `vllm_ascend/ops/triton/fla/fused_qkvzba_split_reshape.py` |
| `vllm_ascend/ops/triton/fla/l2norm.py` |
| `vllm_ascend/ops/triton/fla/layernorm_guard.py` |
| `vllm_ascend/ops/triton/fla/sigmoid_gating.py` |
| `vllm_ascend/ops/triton/fla/solve_tril.py` |
| `vllm_ascend/ops/triton/fla/utils.py` |
| `vllm_ascend/ops/triton/fla/wy_fast.py` |
| `vllm_ascend/ops/triton/fused_gdn_gating.py` |
| `vllm_ascend/ops/triton/layernorm_gated.py` |
| `vllm_ascend/ops/triton/linearnorm/split_qkv_rmsnorm_rope.py` |
| `vllm_ascend/ops/triton/mamba/causal_conv1d.py` |
| `vllm_ascend/ops/triton/reject_sample.py` |
| `vllm_ascend/ops/triton/rope.py` |
| `vllm_ascend/ops/triton/spec_decode/utils.py` |
| `vllm_ascend/ops/triton/triton_utils.py` |

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.14.0
- vLLM main:
d68209402d

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-01-23 14:59:19 +08:00
committed by GitHub
parent 193acc2c19
commit 78af0c30a3
25 changed files with 760 additions and 996 deletions

View File

@@ -9,7 +9,6 @@
# ruff: noqa: E501
# mypy: ignore-errors
from typing import Optional, Tuple
import torch
from vllm.triton_utils import tl, triton
@@ -17,23 +16,39 @@ from vllm.triton_utils import tl, triton
from .utils import prepare_chunk_indices
@triton.heuristics({'IS_VARLEN': lambda args: args['cu_seqlens'] is not None})
@triton.jit(do_not_specialize=['T'])
def recompute_w_u_fwd_kernel(k, v, beta, w, u, A, g, cu_seqlens, chunk_indices,
T, H: tl.constexpr, Hg: tl.constexpr,
K: tl.constexpr, V: tl.constexpr,
BT: tl.constexpr, BK: tl.constexpr,
BV: tl.constexpr, IS_VARLEN: tl.constexpr):
@triton.heuristics({"IS_VARLEN": lambda args: args["cu_seqlens"] is not None})
@triton.jit(do_not_specialize=["T"])
def recompute_w_u_fwd_kernel(
k,
v,
beta,
w,
u,
A,
g,
cu_seqlens,
chunk_indices,
T,
H: tl.constexpr,
Hg: tl.constexpr,
K: tl.constexpr,
V: tl.constexpr,
BT: tl.constexpr,
BK: tl.constexpr,
BV: tl.constexpr,
IS_VARLEN: tl.constexpr,
):
T_max = T
i_t_o = tl.program_id(0)
for i_bh in range(H):
i_b, i_h = i_bh // H, i_bh % H
if IS_VARLEN:
i_n, i_t = tl.load(chunk_indices + i_t_o * 2).to(
tl.int32), tl.load(chunk_indices + i_t_o * 2 + 1).to(tl.int32)
bos, eos = tl.load(cu_seqlens + i_n).to(
tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
i_n, i_t = (
tl.load(chunk_indices + i_t_o * 2).to(tl.int32),
tl.load(chunk_indices + i_t_o * 2 + 1).to(tl.int32),
)
bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
@@ -44,7 +59,7 @@ def recompute_w_u_fwd_kernel(k, v, beta, w, u, A, g, cu_seqlens, chunk_indices,
offs_t_2d = global_offs_t[:, None]
offs_bt = tl.arange(0, BT)[None, :]
ptr_A = (A + (bos * H + i_h) * BT + offs_t_2d * (H * BT) + offs_bt * 1)
ptr_A = A + (bos * H + i_h) * BT + offs_t_2d * (H * BT) + offs_bt * 1
mask_A = mask_t[:, None]
b_A = tl.load(ptr_A, mask=mask_A, other=0.0).to(tl.float32)
@@ -58,29 +73,25 @@ def recompute_w_u_fwd_kernel(k, v, beta, w, u, A, g, cu_seqlens, chunk_indices,
offs_v = i_v * BV + tl.arange(0, BV)[None, :]
mask_v = (mask_t[:, None]) & (offs_v < V)
ptr_v = (v + (bos * H + i_h) * V + offs_t_2d * (H * V) +
offs_v * 1)
ptr_v = v + (bos * H + i_h) * V + offs_t_2d * (H * V) + offs_v * 1
b_v = tl.load(ptr_v, mask=mask_v, other=0.0).to(tl.float32)
b_vb = (b_v * b_beta[:, None])
b_vb = b_v * b_beta[:, None]
b_u = tl.dot(b_A, b_vb, allow_tf32=False)
ptr_u = (u + (bos * H + i_h) * V + offs_t_2d * (H * V) +
offs_v * 1)
ptr_u = u + (bos * H + i_h) * V + offs_t_2d * (H * V) + offs_v * 1
tl.store(ptr_u, b_u.to(ptr_u.dtype.element_ty), mask=mask_v)
for i_k in range(tl.cdiv(K, BK)):
offs_k = i_k * BK + tl.arange(0, BK)[None, :]
mask_k = (mask_t[:, None]) & (offs_k < K)
ptr_k = (k + (bos * Hg + i_h // (H // Hg)) * K + offs_t_2d *
(Hg * K) + offs_k * 1)
ptr_k = k + (bos * Hg + i_h // (H // Hg)) * K + offs_t_2d * (Hg * K) + offs_k * 1
b_k = tl.load(ptr_k, mask=mask_k, other=0.0).to(tl.float32)
b_kb = (b_k * b_beta[:, None] * b_g[:, None])
b_kb = b_k * b_beta[:, None] * b_g[:, None]
b_w = tl.dot(b_A, b_kb)
ptr_w = (w + (bos * H + i_h) * K + offs_t_2d * (H * K) +
offs_k * 1)
ptr_w = w + (bos * H + i_h) * K + offs_t_2d * (H * K) + offs_k * 1
tl.store(ptr_w, b_w.to(ptr_w.dtype.element_ty), mask=mask_k)
@@ -90,14 +101,13 @@ def recompute_w_u_fwd(
beta: torch.Tensor,
g_cumsum: torch.Tensor,
A: torch.Tensor,
cu_seqlens: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
cu_seqlens: torch.LongTensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
B, T, Hg, K, V = *k.shape, v.shape[-1]
H = v.shape[-2]
BT = A.shape[-1]
chunk_indices = prepare_chunk_indices(cu_seqlens, BT) \
if cu_seqlens is not None else None
chunk_indices = prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
NT = triton.cdiv(T, BT) if cu_seqlens is None else len(chunk_indices)
BK = 64