### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
| `vllm_ascend/ops/triton/activation/swiglu_quant.py` |
| `vllm_ascend/ops/triton/batch_invariant/matmul.py` |
| `vllm_ascend/ops/triton/batch_invariant/mean.py` |
| `vllm_ascend/ops/triton/batch_invariant/rmsnorm.py` |
| `vllm_ascend/ops/triton/fla/chunk.py` |
| `vllm_ascend/ops/triton/fla/chunk_delta_h.py` |
| `vllm_ascend/ops/triton/fla/chunk_o.py` |
| `vllm_ascend/ops/triton/fla/chunk_scaled_dot_kkt.py` |
| `vllm_ascend/ops/triton/fla/cumsum.py` |
| `vllm_ascend/ops/triton/fla/fused_qkvzba_split_reshape.py` |
| `vllm_ascend/ops/triton/fla/l2norm.py` |
| `vllm_ascend/ops/triton/fla/layernorm_guard.py` |
| `vllm_ascend/ops/triton/fla/sigmoid_gating.py` |
| `vllm_ascend/ops/triton/fla/solve_tril.py` |
| `vllm_ascend/ops/triton/fla/utils.py` |
| `vllm_ascend/ops/triton/fla/wy_fast.py` |
| `vllm_ascend/ops/triton/fused_gdn_gating.py` |
| `vllm_ascend/ops/triton/layernorm_gated.py` |
| `vllm_ascend/ops/triton/linearnorm/split_qkv_rmsnorm_rope.py` |
| `vllm_ascend/ops/triton/mamba/causal_conv1d.py` |
| `vllm_ascend/ops/triton/reject_sample.py` |
| `vllm_ascend/ops/triton/rope.py` |
| `vllm_ascend/ops/triton/spec_decode/utils.py` |
| `vllm_ascend/ops/triton/triton_utils.py` |
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.14.0
- vLLM main:
d68209402d
Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
@@ -9,7 +9,6 @@
|
||||
# ruff: noqa: E501
|
||||
# mypy: ignore-errors
|
||||
import warnings
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from einops import rearrange
|
||||
@@ -25,22 +24,20 @@ from .utils import input_guard
|
||||
from .wy_fast import recompute_w_u_fwd
|
||||
|
||||
|
||||
def chunk_gated_delta_rule_fwd(q: torch.Tensor,
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
g: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
scale: float,
|
||||
initial_state: torch.Tensor,
|
||||
output_final_state: bool,
|
||||
cu_seqlens: Optional[torch.LongTensor] = None):
|
||||
def chunk_gated_delta_rule_fwd(
|
||||
q: torch.Tensor,
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
g: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
scale: float,
|
||||
initial_state: torch.Tensor,
|
||||
output_final_state: bool,
|
||||
cu_seqlens: torch.LongTensor | None = None,
|
||||
):
|
||||
g = chunk_local_cumsum(g, chunk_size=64, cu_seqlens=cu_seqlens)
|
||||
# obtain WY representation. u is actually the new v.
|
||||
A = chunk_scaled_dot_kkt_fwd(k=k,
|
||||
beta=beta,
|
||||
g_cumsum=g,
|
||||
cu_seqlens=cu_seqlens,
|
||||
output_dtype=torch.float32)
|
||||
A = chunk_scaled_dot_kkt_fwd(k=k, beta=beta, g_cumsum=g, cu_seqlens=cu_seqlens, output_dtype=torch.float32)
|
||||
A = solve_tril(A=A, cu_seqlens=cu_seqlens, output_dtype=k.dtype)
|
||||
w, u = recompute_w_u_fwd(
|
||||
k=k,
|
||||
@@ -75,20 +72,21 @@ def chunk_gated_delta_rule_fwd(q: torch.Tensor,
|
||||
|
||||
|
||||
class ChunkGatedDeltaRuleFunction(torch.autograd.Function):
|
||||
|
||||
@staticmethod
|
||||
@input_guard
|
||||
def forward(ctx,
|
||||
q: torch.Tensor,
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
g: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
scale: float,
|
||||
initial_state: torch.Tensor,
|
||||
output_final_state: bool,
|
||||
cu_seqlens: Optional[torch.LongTensor] = None,
|
||||
use_qk_l2norm_in_kernel: bool = False):
|
||||
def forward(
|
||||
ctx,
|
||||
q: torch.Tensor,
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
g: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
scale: float,
|
||||
initial_state: torch.Tensor,
|
||||
output_final_state: bool,
|
||||
cu_seqlens: torch.LongTensor | None = None,
|
||||
use_qk_l2norm_in_kernel: bool = False,
|
||||
):
|
||||
if use_qk_l2norm_in_kernel:
|
||||
q = l2norm_fwd(q)
|
||||
k = l2norm_fwd(k)
|
||||
@@ -110,17 +108,19 @@ class ChunkGatedDeltaRuleFunction(torch.autograd.Function):
|
||||
|
||||
|
||||
@torch.compiler.disable
|
||||
def chunk_gated_delta_rule(q: torch.Tensor,
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
g: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
scale: float = None,
|
||||
initial_state: torch.Tensor = None,
|
||||
output_final_state: bool = False,
|
||||
cu_seqlens: Optional[torch.LongTensor] = None,
|
||||
head_first: bool = False,
|
||||
use_qk_l2norm_in_kernel: bool = False):
|
||||
def chunk_gated_delta_rule(
|
||||
q: torch.Tensor,
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
g: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
scale: float = None,
|
||||
initial_state: torch.Tensor = None,
|
||||
output_final_state: bool = False,
|
||||
cu_seqlens: torch.LongTensor | None = None,
|
||||
head_first: bool = False,
|
||||
use_qk_l2norm_in_kernel: bool = False,
|
||||
):
|
||||
r"""
|
||||
Args:
|
||||
q (torch.Tensor):
|
||||
@@ -186,41 +186,39 @@ def chunk_gated_delta_rule(q: torch.Tensor,
|
||||
"""
|
||||
assert q.dtype == k.dtype == v.dtype
|
||||
assert q.dtype != torch.float32, "ChunkGatedDeltaRuleFunction does not support float32. Please use bfloat16."
|
||||
assert len(
|
||||
beta.shape
|
||||
) == 3, "beta must be of shape [B, T, H] if head_first=False, or [B, H, T] otherwise."
|
||||
assert len(beta.shape) == 3, "beta must be of shape [B, T, H] if head_first=False, or [B, H, T] otherwise."
|
||||
|
||||
if head_first:
|
||||
raise DeprecationWarning(
|
||||
"head_first is deprecated and will be removed in a future version. "
|
||||
"Please use head_first=False for now instead.",
|
||||
stacklevel=2)
|
||||
q, k, v, beta, g = map(
|
||||
lambda x: rearrange(x, 'b h t ... -> b t h ...'),
|
||||
(q, k, v, beta, g))
|
||||
stacklevel=2,
|
||||
)
|
||||
q, k, v, beta, g = map(lambda x: rearrange(x, "b h t ... -> b t h ..."), (q, k, v, beta, g))
|
||||
if not head_first and q.shape[1] < q.shape[2]:
|
||||
warnings.warn(
|
||||
f"Input tensor shape suggests potential format mismatch: seq_len ({q.shape[1]}) < num_heads ({q.shape[2]}). "
|
||||
"This may indicate the inputs were passed in head-first format [B, H, T, ...] "
|
||||
"when head_first=False was specified. "
|
||||
"Please verify your input tensor format matches the expected shape [B, T, H, ...].",
|
||||
stacklevel=2)
|
||||
stacklevel=2,
|
||||
)
|
||||
if cu_seqlens is not None:
|
||||
if q.shape[0] != 1:
|
||||
raise ValueError(
|
||||
f"The batch size is expected to be 1 rather than {q.shape[0]} when using `cu_seqlens`."
|
||||
f"Please flatten variable-length inputs before processing.")
|
||||
if initial_state is not None and initial_state.shape[0] != len(
|
||||
cu_seqlens) - 1:
|
||||
f"Please flatten variable-length inputs before processing."
|
||||
)
|
||||
if initial_state is not None and initial_state.shape[0] != len(cu_seqlens) - 1:
|
||||
raise ValueError(
|
||||
f"The number of initial states is expected to be equal to the number of input sequences, "
|
||||
f"i.e., {len(cu_seqlens) - 1} rather than {initial_state.shape[0]}."
|
||||
)
|
||||
if scale is None:
|
||||
scale = k.shape[-1]**-0.5
|
||||
scale = k.shape[-1] ** -0.5
|
||||
o, final_state = ChunkGatedDeltaRuleFunction.apply(
|
||||
q, k, v, g, beta, scale, initial_state, output_final_state, cu_seqlens,
|
||||
use_qk_l2norm_in_kernel)
|
||||
q, k, v, g, beta, scale, initial_state, output_final_state, cu_seqlens, use_qk_l2norm_in_kernel
|
||||
)
|
||||
if head_first:
|
||||
o = rearrange(o, 'b t h ... -> b h t ...')
|
||||
return o, final_state
|
||||
o = rearrange(o, "b t h ... -> b h t ...")
|
||||
return o, final_state
|
||||
|
||||
@@ -8,23 +8,24 @@
|
||||
# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
|
||||
# ruff: noqa: E501
|
||||
# mypy: ignore-errors
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from vllm.triton_utils import tl, triton
|
||||
|
||||
from .utils import prepare_chunk_indices, prepare_chunk_offsets, safe_exp
|
||||
|
||||
_CONDITIONS = ("seq7168", )
|
||||
_CONDITIONS = ("seq7168",)
|
||||
|
||||
|
||||
@triton.heuristics({
|
||||
"USE_G": lambda args: args["g"] is not None,
|
||||
"USE_INITIAL_STATE": lambda args: args["h0"] is not None,
|
||||
"STORE_FINAL_STATE": lambda args: args["ht"] is not None,
|
||||
"SAVE_NEW_VALUE": lambda args: args["v_new"] is not None,
|
||||
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
|
||||
})
|
||||
@triton.heuristics(
|
||||
{
|
||||
"USE_G": lambda args: args["g"] is not None,
|
||||
"USE_INITIAL_STATE": lambda args: args["h0"] is not None,
|
||||
"STORE_FINAL_STATE": lambda args: args["ht"] is not None,
|
||||
"SAVE_NEW_VALUE": lambda args: args["v_new"] is not None,
|
||||
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
|
||||
}
|
||||
)
|
||||
@triton.jit(do_not_specialize=["T"])
|
||||
def chunk_gated_delta_rule_fwd_kernel_h_blockdim64(
|
||||
k,
|
||||
@@ -85,28 +86,20 @@ def chunk_gated_delta_rule_fwd_kernel_h_blockdim64(
|
||||
if USE_INITIAL_STATE:
|
||||
h0_ptr = h0 + i_nh * K * V
|
||||
ptr_h0_bv1 = h0_ptr + offs_k * V + offs_v1 * 1
|
||||
b_h1_bv1 += tl.load(ptr_h0_bv1, mask=mask_kv1,
|
||||
other=0.0).to(tl.float32)
|
||||
b_h1_bv1 += tl.load(ptr_h0_bv1, mask=mask_kv1, other=0.0).to(tl.float32)
|
||||
|
||||
ptr_h0_bv2 = h0_ptr + offs_k * V + offs_v2 * 1
|
||||
b_h1_bv2 += tl.load(ptr_h0_bv2, mask=mask_kv2,
|
||||
other=0.0).to(tl.float32)
|
||||
b_h1_bv2 += tl.load(ptr_h0_bv2, mask=mask_kv2, other=0.0).to(tl.float32)
|
||||
|
||||
# main recurrence
|
||||
for i_t in range(NT):
|
||||
h_base = h + (boh + i_t) * H * K * V + i_h * K * V
|
||||
|
||||
p_h1_bv1 = tl.make_block_ptr(h_base, (K, V), (V, 1), (0, v_start1),
|
||||
(128, 64), (1, 0))
|
||||
tl.store(p_h1_bv1,
|
||||
b_h1_bv1.to(p_h1_bv1.dtype.element_ty),
|
||||
boundary_check=(0, 1))
|
||||
p_h1_bv1 = tl.make_block_ptr(h_base, (K, V), (V, 1), (0, v_start1), (128, 64), (1, 0))
|
||||
tl.store(p_h1_bv1, b_h1_bv1.to(p_h1_bv1.dtype.element_ty), boundary_check=(0, 1))
|
||||
|
||||
p_h1_bv2 = tl.make_block_ptr(h_base, (K, V), (V, 1), (0, v_start2),
|
||||
(128, 64), (1, 0))
|
||||
tl.store(p_h1_bv2,
|
||||
b_h1_bv2.to(p_h1_bv2.dtype.element_ty),
|
||||
boundary_check=(0, 1))
|
||||
p_h1_bv2 = tl.make_block_ptr(h_base, (K, V), (V, 1), (0, v_start2), (128, 64), (1, 0))
|
||||
tl.store(p_h1_bv2, b_h1_bv2.to(p_h1_bv2.dtype.element_ty), boundary_check=(0, 1))
|
||||
|
||||
offs_t_wv = (i_t * BT + tl.arange(0, BT))[:, None]
|
||||
offs_k_wv = tl.arange(0, 128)[None, :]
|
||||
@@ -117,8 +110,7 @@ def chunk_gated_delta_rule_fwd_kernel_h_blockdim64(
|
||||
b_w = tl.load(ptr_w, mask=mask_w, other=0.0)
|
||||
|
||||
k_base = k + bos * Hg * K + (i_h // (H // Hg)) * K
|
||||
p_k = tl.make_block_ptr(k_base, (K, T), (1, stride_k), (0, i_t * BT),
|
||||
(128, BT), (0, 1))
|
||||
p_k = tl.make_block_ptr(k_base, (K, T), (1, stride_k), (0, i_t * BT), (128, BT), (0, 1))
|
||||
b_k = tl.load(p_k, boundary_check=(0, 1))
|
||||
|
||||
v_new_base = v_new + bos * H * V + i_h * V
|
||||
@@ -144,12 +136,8 @@ def chunk_gated_delta_rule_fwd_kernel_h_blockdim64(
|
||||
b_v_new1 -= tl.dot(b_w, b_h1_bv1.to(b_w.dtype))
|
||||
|
||||
if SAVE_NEW_VALUE:
|
||||
p_v_new1 = tl.make_block_ptr(v_new_base, (T, V), (stride_v, 1),
|
||||
(i_t * BT, v_start1), (BT, 64),
|
||||
(1, 0))
|
||||
tl.store(p_v_new1,
|
||||
b_v_new1.to(p_v_new1.dtype.element_ty),
|
||||
boundary_check=(0, 1))
|
||||
p_v_new1 = tl.make_block_ptr(v_new_base, (T, V), (stride_v, 1), (i_t * BT, v_start1), (BT, 64), (1, 0))
|
||||
tl.store(p_v_new1, b_v_new1.to(p_v_new1.dtype.element_ty), boundary_check=(0, 1))
|
||||
|
||||
if USE_G:
|
||||
b_v_new1 = b_v_new1 * b_g[:, None]
|
||||
@@ -165,12 +153,8 @@ def chunk_gated_delta_rule_fwd_kernel_h_blockdim64(
|
||||
b_v_new2 -= tl.dot(b_w, b_h1_bv2.to(b_w.dtype))
|
||||
|
||||
if SAVE_NEW_VALUE:
|
||||
p_v_new2 = tl.make_block_ptr(v_new_base, (T, V), (stride_v, 1),
|
||||
(i_t * BT, v_start2), (BT, 64),
|
||||
(1, 0))
|
||||
tl.store(p_v_new2,
|
||||
b_v_new2.to(p_v_new2.dtype.element_ty),
|
||||
boundary_check=(0, 1))
|
||||
p_v_new2 = tl.make_block_ptr(v_new_base, (T, V), (stride_v, 1), (i_t * BT, v_start2), (BT, 64), (1, 0))
|
||||
tl.store(p_v_new2, b_v_new2.to(p_v_new2.dtype.element_ty), boundary_check=(0, 1))
|
||||
|
||||
if USE_G:
|
||||
b_v_new2 = b_v_new2 * b_g[:, None]
|
||||
@@ -183,29 +167,23 @@ def chunk_gated_delta_rule_fwd_kernel_h_blockdim64(
|
||||
if STORE_FINAL_STATE:
|
||||
ht_ptr = ht + i_nh * K * V
|
||||
|
||||
p_ht1_bv1 = tl.make_block_ptr(ht_ptr, (K, V), (V, 1), (0, v_start1),
|
||||
(128, 64), (1, 0))
|
||||
tl.store(p_ht1_bv1,
|
||||
b_h1_bv1.to(p_ht1_bv1.dtype.element_ty),
|
||||
boundary_check=(0, 1))
|
||||
p_ht1_bv1 = tl.make_block_ptr(ht_ptr, (K, V), (V, 1), (0, v_start1), (128, 64), (1, 0))
|
||||
tl.store(p_ht1_bv1, b_h1_bv1.to(p_ht1_bv1.dtype.element_ty), boundary_check=(0, 1))
|
||||
|
||||
p_ht1_bv2 = tl.make_block_ptr(ht_ptr, (K, V), (V, 1), (0, v_start2),
|
||||
(128, 64), (1, 0))
|
||||
tl.store(p_ht1_bv2,
|
||||
b_h1_bv2.to(p_ht1_bv2.dtype.element_ty),
|
||||
boundary_check=(0, 1))
|
||||
p_ht1_bv2 = tl.make_block_ptr(ht_ptr, (K, V), (V, 1), (0, v_start2), (128, 64), (1, 0))
|
||||
tl.store(p_ht1_bv2, b_h1_bv2.to(p_ht1_bv2.dtype.element_ty), boundary_check=(0, 1))
|
||||
|
||||
|
||||
def chunk_gated_delta_rule_fwd_h(
|
||||
k: torch.Tensor,
|
||||
w: torch.Tensor,
|
||||
u: torch.Tensor,
|
||||
g: Optional[torch.Tensor] = None,
|
||||
initial_state: Optional[torch.Tensor] = None,
|
||||
g: torch.Tensor | None = None,
|
||||
initial_state: torch.Tensor | None = None,
|
||||
output_final_state: bool = False,
|
||||
chunk_size: int = 64, # SY: remove this argument and force chunk size 64?
|
||||
save_new_value: bool = True,
|
||||
cu_seqlens: Optional[torch.LongTensor] = None,
|
||||
cu_seqlens: torch.LongTensor | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
# This kernel is slightly different from fla to support Q/K with different head numbers.
|
||||
# In fla, Q/K always have the same head number, so Hg is always equal to H.
|
||||
@@ -213,8 +191,7 @@ def chunk_gated_delta_rule_fwd_h(
|
||||
H = u.shape[-2]
|
||||
BT = chunk_size
|
||||
|
||||
chunk_indices = (prepare_chunk_indices(cu_seqlens, chunk_size)
|
||||
if cu_seqlens is not None else None)
|
||||
chunk_indices = prepare_chunk_indices(cu_seqlens, chunk_size) if cu_seqlens is not None else None
|
||||
# N: the actual number of sequences in the batch with either equal or variable lengths
|
||||
if cu_seqlens is None:
|
||||
N, NT, chunk_offsets = B, triton.cdiv(T, BT), None
|
||||
@@ -227,8 +204,7 @@ def chunk_gated_delta_rule_fwd_h(
|
||||
assert K <= 256, "current kernel does not support head dimension larger than 256."
|
||||
|
||||
h = k.new_empty(B, NT, H, K, V)
|
||||
final_state = (k.new_empty(N, H, K, V, dtype=torch.float32)
|
||||
if output_final_state else None)
|
||||
final_state = k.new_empty(N, H, K, V, dtype=torch.float32) if output_final_state else None
|
||||
|
||||
v_new = torch.empty_like(u) if save_new_value else None
|
||||
g = g.transpose(1, 2).contiguous()
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
|
||||
# ruff: noqa: E501
|
||||
# mypy: ignore-errors
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from vllm.triton_utils import tl, triton
|
||||
@@ -17,11 +16,13 @@ from vllm.triton_utils import tl, triton
|
||||
from .utils import prepare_chunk_offsets, safe_exp
|
||||
|
||||
|
||||
@triton.heuristics({
|
||||
'USE_G': lambda args: args['g'] is not None,
|
||||
'IS_VARLEN': lambda args: args['cu_seqlens'] is not None,
|
||||
})
|
||||
@triton.jit(do_not_specialize=['T'])
|
||||
@triton.heuristics(
|
||||
{
|
||||
"USE_G": lambda args: args["g"] is not None,
|
||||
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
|
||||
}
|
||||
)
|
||||
@triton.jit(do_not_specialize=["T"])
|
||||
def chunk_fwd_kernel_o(
|
||||
q,
|
||||
k,
|
||||
@@ -48,8 +49,7 @@ def chunk_fwd_kernel_o(
|
||||
T_max = T
|
||||
|
||||
if IS_VARLEN:
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(
|
||||
tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
|
||||
T = eos - bos
|
||||
NT = tl.cdiv(T, BT)
|
||||
boh = tl.load(chunk_offsets + i_n).to(tl.int64)
|
||||
@@ -71,12 +71,9 @@ def chunk_fwd_kernel_o(
|
||||
b_A = tl.zeros([BT, BT], dtype=tl.float32)
|
||||
|
||||
for i_k in range(tl.cdiv(K, BK)):
|
||||
p_q = tl.make_block_ptr(q, (T, K), (Hg * K, 1),
|
||||
(i_t * BT, i_k * BK), (BT, BK), (1, 0))
|
||||
p_k = tl.make_block_ptr(k, (K, T), (1, Hg * K),
|
||||
(i_k * BK, i_t * BT), (BK, BT), (0, 1))
|
||||
p_h = tl.make_block_ptr(h_base, (K, V), (V, 1),
|
||||
(i_k * BK, i_v * BV), (BK, BV), (1, 0))
|
||||
p_q = tl.make_block_ptr(q, (T, K), (Hg * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
|
||||
p_k = tl.make_block_ptr(k, (K, T), (1, Hg * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
|
||||
p_h = tl.make_block_ptr(h_base, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0))
|
||||
# [BT, BK]
|
||||
b_q = tl.load(p_q, boundary_check=(0, 1))
|
||||
# [BK, BT]
|
||||
@@ -102,10 +99,8 @@ def chunk_fwd_kernel_o(
|
||||
m_A = o_i[:, None] >= o_i[None, :]
|
||||
b_A = tl.where(m_A, b_A, 0)
|
||||
|
||||
p_v = tl.make_block_ptr(v, (T, V), (H * V, 1), (i_t * BT, i_v * BV),
|
||||
(BT, BV), (1, 0))
|
||||
p_o = tl.make_block_ptr(o, (T, V), (H * V, 1), (i_t * BT, i_v * BV),
|
||||
(BT, BV), (1, 0))
|
||||
p_v = tl.make_block_ptr(v, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
|
||||
p_o = tl.make_block_ptr(o, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
|
||||
|
||||
b_v = tl.load(p_v, boundary_check=(0, 1))
|
||||
# to fix mma -> mma layout conversion
|
||||
@@ -119,9 +114,9 @@ def chunk_fwd_o(
|
||||
k: torch.Tensor,
|
||||
v: torch.Tensor,
|
||||
h: torch.Tensor,
|
||||
g: Optional[torch.Tensor] = None,
|
||||
scale: Optional[float] = None,
|
||||
cu_seqlens: Optional[torch.LongTensor] = None,
|
||||
g: torch.Tensor | None = None,
|
||||
scale: float | None = None,
|
||||
cu_seqlens: torch.LongTensor | None = None,
|
||||
chunk_size: int = 64,
|
||||
) -> torch.Tensor:
|
||||
B, T, Hg, K, V = *q.shape, v.shape[-1]
|
||||
@@ -129,7 +124,7 @@ def chunk_fwd_o(
|
||||
BT = chunk_size
|
||||
|
||||
if scale is None:
|
||||
scale = k.shape[-1]**-0.5
|
||||
scale = k.shape[-1] ** -0.5
|
||||
|
||||
o = torch.empty_like(v)
|
||||
if cu_seqlens is None:
|
||||
@@ -141,7 +136,7 @@ def chunk_fwd_o(
|
||||
)
|
||||
|
||||
def grid(meta):
|
||||
return (triton.cdiv(V, meta['BV']), N * H)
|
||||
return (triton.cdiv(V, meta["BV"]), N * H)
|
||||
|
||||
g = g.transpose(1, 2).contiguous()
|
||||
chunk_fwd_kernel_o[grid](
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
|
||||
# ruff: noqa: E501
|
||||
# mypy: ignore-errors
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from vllm.triton_utils import tl, triton
|
||||
@@ -16,11 +15,13 @@ from vllm.triton_utils import tl, triton
|
||||
from .utils import prepare_chunk_indices, safe_exp
|
||||
|
||||
|
||||
@triton.heuristics({
|
||||
'IS_VARLEN': lambda args: args['cu_seqlens'] is not None,
|
||||
'USE_G': lambda args: args['g_cumsum'] is not None,
|
||||
})
|
||||
@triton.jit(do_not_specialize=['T'])
|
||||
@triton.heuristics(
|
||||
{
|
||||
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
|
||||
"USE_G": lambda args: args["g_cumsum"] is not None,
|
||||
}
|
||||
)
|
||||
@triton.jit(do_not_specialize=["T"])
|
||||
def chunk_scaled_dot_kkt_fwd_kernel(
|
||||
k,
|
||||
beta, # [H, B, T]
|
||||
@@ -44,10 +45,11 @@ def chunk_scaled_dot_kkt_fwd_kernel(
|
||||
for i_bh in range(B * H):
|
||||
i_b, i_h = i_bh // H, i_bh % H
|
||||
if IS_VARLEN:
|
||||
i_n, i_t = tl.load(chunk_indices + i_t_i * 2).to(
|
||||
tl.int32), tl.load(chunk_indices + i_t_i * 2 + 1).to(tl.int32)
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(
|
||||
tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
|
||||
i_n, i_t = (
|
||||
tl.load(chunk_indices + i_t_i * 2).to(tl.int32),
|
||||
tl.load(chunk_indices + i_t_i * 2 + 1).to(tl.int32),
|
||||
)
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
|
||||
T = eos - bos
|
||||
else:
|
||||
bos, eos = i_b * T, i_b * T + T
|
||||
@@ -55,39 +57,37 @@ def chunk_scaled_dot_kkt_fwd_kernel(
|
||||
o_t = tl.arange(0, BT)
|
||||
o_t_fp32 = o_t.to(tl.float32)
|
||||
|
||||
p_beta = tl.make_block_ptr(beta + i_h * bt_stride + bos, (T, ), (1, ),
|
||||
(i_t * BT, ), (BT, ), (0, ))
|
||||
b_beta = tl.load(p_beta, boundary_check=(0, ))
|
||||
p_beta = tl.make_block_ptr(beta + i_h * bt_stride + bos, (T,), (1,), (i_t * BT,), (BT,), (0,))
|
||||
b_beta = tl.load(p_beta, boundary_check=(0,))
|
||||
|
||||
b_A = tl.zeros([BT, BT], dtype=tl.float32)
|
||||
for i_k in range(tl.cdiv(K, BK)):
|
||||
p_k = tl.make_block_ptr(k + (bos * Hg + i_h // (H // Hg)) * K,
|
||||
(T, K), (Hg * K, 1), (i_t * BT, i_k * BK),
|
||||
(BT, BK), (1, 0))
|
||||
p_k = tl.make_block_ptr(
|
||||
k + (bos * Hg + i_h // (H // Hg)) * K, (T, K), (Hg * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)
|
||||
)
|
||||
b_k = tl.load(p_k, boundary_check=(0, 1))
|
||||
b_A += tl.dot(b_k, tl.trans(b_k))
|
||||
|
||||
if USE_G:
|
||||
p_g = tl.make_block_ptr(g_cumsum + i_h * bt_stride + bos, (T, ),
|
||||
(1, ), (i_t * BT, ), (BT, ), (0, ))
|
||||
b_g = tl.load(p_g, boundary_check=(0, ))
|
||||
p_g = tl.make_block_ptr(g_cumsum + i_h * bt_stride + bos, (T,), (1,), (i_t * BT,), (BT,), (0,))
|
||||
b_g = tl.load(p_g, boundary_check=(0,))
|
||||
b_g_diff = b_g[:, None] - b_g[None, :]
|
||||
b_A *= safe_exp(b_g_diff)
|
||||
|
||||
b_A *= b_beta[:, None]
|
||||
b_A = tl.where(o_t_fp32[:, None] > o_t_fp32[None, :], b_A, 0)
|
||||
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (BT * H, 1),
|
||||
(i_t * BT, 0), (BT, BT), (1, 0))
|
||||
p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (T, BT), (BT * H, 1), (i_t * BT, 0), (BT, BT), (1, 0))
|
||||
tl.store(p_A, b_A.to(p_A.dtype.element_ty), boundary_check=(0, 1))
|
||||
|
||||
|
||||
def chunk_scaled_dot_kkt_fwd(
|
||||
k: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
g_cumsum: Optional[torch.Tensor] = None,
|
||||
cu_seqlens: Optional[torch.LongTensor] = None,
|
||||
chunk_size: int = 64,
|
||||
output_dtype: torch.dtype = torch.float32) -> torch.Tensor:
|
||||
k: torch.Tensor,
|
||||
beta: torch.Tensor,
|
||||
g_cumsum: torch.Tensor | None = None,
|
||||
cu_seqlens: torch.LongTensor | None = None,
|
||||
chunk_size: int = 64,
|
||||
output_dtype: torch.dtype = torch.float32,
|
||||
) -> torch.Tensor:
|
||||
r"""
|
||||
Compute beta * K * K^T.
|
||||
|
||||
@@ -117,8 +117,7 @@ def chunk_scaled_dot_kkt_fwd(
|
||||
BT = chunk_size
|
||||
if cu_seqlens is not None:
|
||||
cu_seqlens = cu_seqlens.cpu()
|
||||
chunk_indices = (prepare_chunk_indices(cu_seqlens, BT)
|
||||
if cu_seqlens is not None else None)
|
||||
chunk_indices = prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
|
||||
chunk_indices = chunk_indices.npu()
|
||||
cu_seqlens = cu_seqlens.npu()
|
||||
else:
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
|
||||
# ruff: noqa: E501
|
||||
# mypy: ignore-errors
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from vllm.triton_utils import tl, triton
|
||||
@@ -16,11 +15,10 @@ from vllm.triton_utils import tl, triton
|
||||
from .utils import prepare_chunk_indices
|
||||
|
||||
|
||||
@triton.heuristics({
|
||||
'HAS_SCALE': lambda args: args['scale'] is not None,
|
||||
'IS_VARLEN': lambda args: args['cu_seqlens'] is not None
|
||||
})
|
||||
@triton.jit(do_not_specialize=['T'])
|
||||
@triton.heuristics(
|
||||
{"HAS_SCALE": lambda args: args["scale"] is not None, "IS_VARLEN": lambda args: args["cu_seqlens"] is not None}
|
||||
)
|
||||
@triton.jit(do_not_specialize=["T"])
|
||||
def chunk_local_cumsum_scalar_kernel(
|
||||
s,
|
||||
o,
|
||||
@@ -41,20 +39,19 @@ def chunk_local_cumsum_scalar_kernel(
|
||||
N_CHUNKS: tl.constexpr = BLOCK_T // CHUNK_SIZE
|
||||
|
||||
if IS_VARLEN:
|
||||
i_s, i_block = tl.load(chunk_indices + i_block * 2).to(
|
||||
tl.int32), tl.load(chunk_indices + i_block * 2 + 1).to(tl.int32)
|
||||
bos, eos = tl.load(cu_seqlens + i_s).to(
|
||||
tl.int32), tl.load(cu_seqlens + i_s + 1).to(tl.int32)
|
||||
i_s, i_block = (
|
||||
tl.load(chunk_indices + i_block * 2).to(tl.int32),
|
||||
tl.load(chunk_indices + i_block * 2 + 1).to(tl.int32),
|
||||
)
|
||||
bos, eos = tl.load(cu_seqlens + i_s).to(tl.int32), tl.load(cu_seqlens + i_s + 1).to(tl.int32)
|
||||
T = eos - bos
|
||||
else:
|
||||
bos, eos = i_b * T, i_b * T + T
|
||||
|
||||
if HEAD_FIRST:
|
||||
ptr_s = tl.make_block_ptr(s + bos * H, (H, T), (T, 1),
|
||||
(0, i_block * BLOCK_T), (H, BLOCK_T), (1, 0))
|
||||
ptr_o = tl.make_block_ptr(o + bos * H, (H, T), (T, 1),
|
||||
(0, i_block * BLOCK_T), (H, BLOCK_T), (1, 0))
|
||||
b_s = tl.load(ptr_s, boundary_check=(0, )).to(tl.float32)
|
||||
ptr_s = tl.make_block_ptr(s + bos * H, (H, T), (T, 1), (0, i_block * BLOCK_T), (H, BLOCK_T), (1, 0))
|
||||
ptr_o = tl.make_block_ptr(o + bos * H, (H, T), (T, 1), (0, i_block * BLOCK_T), (H, BLOCK_T), (1, 0))
|
||||
b_s = tl.load(ptr_s, boundary_check=(0,)).to(tl.float32)
|
||||
b_s = tl.reshape(b_s, (H, N_CHUNKS, CHUNK_SIZE))
|
||||
b_s = tl.trans(b_s, (2, 0, 1))
|
||||
b_o = tl.cumsum(b_s, axis=0, reverse=REVERSE)
|
||||
@@ -63,11 +60,9 @@ def chunk_local_cumsum_scalar_kernel(
|
||||
b_o = tl.trans(b_o, (2, 0, 1))
|
||||
b_o = tl.reshape(b_o, (H, BLOCK_T))
|
||||
else:
|
||||
ptr_s = tl.make_block_ptr(s + bos * H, (T, H), (H, 1),
|
||||
(i_block * BLOCK_T, 0), (BLOCK_T, H), (1, 0))
|
||||
ptr_o = tl.make_block_ptr(o + bos * H, (T, H), (H, 1),
|
||||
(i_block * BLOCK_T, 0), (BLOCK_T, H), (1, 0))
|
||||
b_s = tl.load(ptr_s, boundary_check=(0, )).to(tl.float32)
|
||||
ptr_s = tl.make_block_ptr(s + bos * H, (T, H), (H, 1), (i_block * BLOCK_T, 0), (BLOCK_T, H), (1, 0))
|
||||
ptr_o = tl.make_block_ptr(o + bos * H, (T, H), (H, 1), (i_block * BLOCK_T, 0), (BLOCK_T, H), (1, 0))
|
||||
b_s = tl.load(ptr_s, boundary_check=(0,)).to(tl.float32)
|
||||
b_s = tl.reshape(b_s, (N_CHUNKS, CHUNK_SIZE, H))
|
||||
b_s = tl.trans(b_s, (1, 0, 2))
|
||||
b_o = tl.cumsum(b_s, axis=0, reverse=REVERSE)
|
||||
@@ -76,7 +71,7 @@ def chunk_local_cumsum_scalar_kernel(
|
||||
b_o = tl.trans(b_o, (1, 0, 2))
|
||||
b_o = tl.reshape(b_o, (BLOCK_T, H))
|
||||
|
||||
tl.store(ptr_o, b_o.to(s.dtype.element_ty), boundary_check=(0, ))
|
||||
tl.store(ptr_o, b_o.to(s.dtype.element_ty), boundary_check=(0,))
|
||||
return
|
||||
|
||||
|
||||
@@ -85,61 +80,64 @@ def chunk_local_cumsum_scalar(
|
||||
chunk_size,
|
||||
reverse: bool = False,
|
||||
scale: float = None,
|
||||
cu_seqlens: Optional[torch.Tensor] = None,
|
||||
cu_seqlens: torch.Tensor | None = None,
|
||||
head_first: bool = False,
|
||||
output_dtype: Optional[torch.Tensor] = torch.float,
|
||||
output_dtype: torch.Tensor | None = torch.float,
|
||||
):
|
||||
if head_first:
|
||||
B, H, T = g.shape
|
||||
else:
|
||||
B, T, H = g.shape
|
||||
assert chunk_size == 2**(chunk_size.bit_length() -
|
||||
1), "chunk_size must be a power of 2"
|
||||
assert chunk_size == 2 ** (chunk_size.bit_length() - 1), "chunk_size must be a power of 2"
|
||||
OPTIM_BLOCK_SIZE = triton.next_power_of_2((2**18) // (H * chunk_size))
|
||||
block_indices = prepare_chunk_indices(
|
||||
cu_seqlens,
|
||||
chunk_size=OPTIM_BLOCK_SIZE) if cu_seqlens is not None else None
|
||||
num_blocks = len(block_indices) if cu_seqlens is not None else triton.cdiv(
|
||||
T, OPTIM_BLOCK_SIZE)
|
||||
block_indices = prepare_chunk_indices(cu_seqlens, chunk_size=OPTIM_BLOCK_SIZE) if cu_seqlens is not None else None
|
||||
num_blocks = len(block_indices) if cu_seqlens is not None else triton.cdiv(T, OPTIM_BLOCK_SIZE)
|
||||
g_org, g = g, torch.empty_like(g, dtype=output_dtype or g.dtype)
|
||||
grid = (num_blocks, B)
|
||||
chunk_local_cumsum_scalar_kernel[grid](s=g_org,
|
||||
o=g,
|
||||
scale=scale,
|
||||
cu_seqlens=cu_seqlens,
|
||||
chunk_indices=block_indices,
|
||||
T=T,
|
||||
B=B,
|
||||
H=H,
|
||||
BLOCK_T=OPTIM_BLOCK_SIZE,
|
||||
CHUNK_SIZE=chunk_size,
|
||||
HEAD_FIRST=head_first,
|
||||
REVERSE=reverse,
|
||||
num_warps=8,
|
||||
num_stages=3)
|
||||
chunk_local_cumsum_scalar_kernel[grid](
|
||||
s=g_org,
|
||||
o=g,
|
||||
scale=scale,
|
||||
cu_seqlens=cu_seqlens,
|
||||
chunk_indices=block_indices,
|
||||
T=T,
|
||||
B=B,
|
||||
H=H,
|
||||
BLOCK_T=OPTIM_BLOCK_SIZE,
|
||||
CHUNK_SIZE=chunk_size,
|
||||
HEAD_FIRST=head_first,
|
||||
REVERSE=reverse,
|
||||
num_warps=8,
|
||||
num_stages=3,
|
||||
)
|
||||
return g
|
||||
|
||||
|
||||
def chunk_local_cumsum(g: torch.Tensor,
|
||||
chunk_size: int,
|
||||
reverse: bool = False,
|
||||
scale: float = None,
|
||||
cu_seqlens: Optional[torch.Tensor] = None,
|
||||
head_first: bool = False,
|
||||
output_dtype: Optional[torch.dtype] = torch.float,
|
||||
**kwargs) -> torch.Tensor:
|
||||
def chunk_local_cumsum(
|
||||
g: torch.Tensor,
|
||||
chunk_size: int,
|
||||
reverse: bool = False,
|
||||
scale: float = None,
|
||||
cu_seqlens: torch.Tensor | None = None,
|
||||
head_first: bool = False,
|
||||
output_dtype: torch.dtype | None = torch.float,
|
||||
**kwargs,
|
||||
) -> torch.Tensor:
|
||||
if cu_seqlens is not None:
|
||||
assert g.shape[
|
||||
0] == 1, "Only batch size 1 is supported when cu_seqlens are provided"
|
||||
assert g.shape[0] == 1, "Only batch size 1 is supported when cu_seqlens are provided"
|
||||
if len(g.shape) == 3:
|
||||
return chunk_local_cumsum_scalar(g=g,
|
||||
chunk_size=chunk_size,
|
||||
reverse=reverse,
|
||||
scale=scale,
|
||||
cu_seqlens=cu_seqlens,
|
||||
head_first=head_first,
|
||||
output_dtype=output_dtype)
|
||||
return chunk_local_cumsum_scalar(
|
||||
g=g,
|
||||
chunk_size=chunk_size,
|
||||
reverse=reverse,
|
||||
scale=scale,
|
||||
cu_seqlens=cu_seqlens,
|
||||
head_first=head_first,
|
||||
output_dtype=output_dtype,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported input shape {g.shape}, "
|
||||
f"which should be (B, T, H, D) if `head_first=False` "
|
||||
f"or (B, H, T, D) otherwise")
|
||||
raise ValueError(
|
||||
f"Unsupported input shape {g.shape}, "
|
||||
f"which should be (B, T, H, D) if `head_first=False` "
|
||||
f"or (B, H, T, D) otherwise"
|
||||
)
|
||||
|
||||
@@ -31,29 +31,30 @@ def fused_qkvzba_split_reshape_cat_kernel(
|
||||
BA_DIM_T: tl.constexpr = NUM_HEADS_V // NUM_HEADS_QK * 2
|
||||
QKV_DIM_T: tl.constexpr = HEAD_QK * 2 + NUM_HEADS_V // NUM_HEADS_QK * HEAD_V
|
||||
q_end: tl.constexpr = HEAD_QK
|
||||
blk_q_ptr = (mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T +
|
||||
i_qk * QKVZ_DIM_T + tl.arange(0, q_end))
|
||||
blk_q_ptr = mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T + i_qk * QKVZ_DIM_T + tl.arange(0, q_end)
|
||||
k_end: tl.constexpr = q_end + HEAD_QK
|
||||
blk_k_ptr = (mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T +
|
||||
i_qk * QKVZ_DIM_T + tl.arange(q_end, k_end))
|
||||
blk_k_ptr = mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T + i_qk * QKVZ_DIM_T + tl.arange(q_end, k_end)
|
||||
v_end: tl.constexpr = k_end + NUM_HEADS_V // NUM_HEADS_QK * HEAD_V
|
||||
blk_v_ptr = (mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T +
|
||||
i_qk * QKVZ_DIM_T + tl.arange(k_end, v_end))
|
||||
blk_v_ptr = mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T + i_qk * QKVZ_DIM_T + tl.arange(k_end, v_end)
|
||||
z_end: tl.constexpr = v_end + NUM_HEADS_V // NUM_HEADS_QK * HEAD_V
|
||||
blk_z_ptr = (mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T +
|
||||
i_qk * QKVZ_DIM_T + tl.arange(v_end, z_end))
|
||||
blk_q_st_ptr = (mixed_qkv + i_bs * NUM_HEADS_QK * QKV_DIM_T +
|
||||
i_qk * HEAD_QK + tl.arange(0, HEAD_QK))
|
||||
blk_k_st_ptr = (mixed_qkv + i_bs * NUM_HEADS_QK * QKV_DIM_T +
|
||||
NUM_HEADS_QK * HEAD_QK + i_qk * HEAD_QK +
|
||||
tl.arange(0, HEAD_QK))
|
||||
blk_v_st_ptr = (mixed_qkv + i_bs * NUM_HEADS_QK * QKV_DIM_T +
|
||||
NUM_HEADS_QK * HEAD_QK * 2 +
|
||||
i_qk * HEAD_V * NUM_HEADS_V // NUM_HEADS_QK +
|
||||
tl.arange(0, HEAD_V * NUM_HEADS_V // NUM_HEADS_QK))
|
||||
blk_z_st_ptr = (z + i_bs * NUM_HEADS_V * HEAD_V +
|
||||
i_qk * HEAD_V * NUM_HEADS_V // NUM_HEADS_QK +
|
||||
tl.arange(0, HEAD_V * NUM_HEADS_V // NUM_HEADS_QK))
|
||||
blk_z_ptr = mixed_qkvz + i_bs * NUM_HEADS_QK * QKVZ_DIM_T + i_qk * QKVZ_DIM_T + tl.arange(v_end, z_end)
|
||||
blk_q_st_ptr = mixed_qkv + i_bs * NUM_HEADS_QK * QKV_DIM_T + i_qk * HEAD_QK + tl.arange(0, HEAD_QK)
|
||||
blk_k_st_ptr = (
|
||||
mixed_qkv + i_bs * NUM_HEADS_QK * QKV_DIM_T + NUM_HEADS_QK * HEAD_QK + i_qk * HEAD_QK + tl.arange(0, HEAD_QK)
|
||||
)
|
||||
blk_v_st_ptr = (
|
||||
mixed_qkv
|
||||
+ i_bs * NUM_HEADS_QK * QKV_DIM_T
|
||||
+ NUM_HEADS_QK * HEAD_QK * 2
|
||||
+ i_qk * HEAD_V * NUM_HEADS_V // NUM_HEADS_QK
|
||||
+ tl.arange(0, HEAD_V * NUM_HEADS_V // NUM_HEADS_QK)
|
||||
)
|
||||
blk_z_st_ptr = (
|
||||
z
|
||||
+ i_bs * NUM_HEADS_V * HEAD_V
|
||||
+ i_qk * HEAD_V * NUM_HEADS_V // NUM_HEADS_QK
|
||||
+ tl.arange(0, HEAD_V * NUM_HEADS_V // NUM_HEADS_QK)
|
||||
)
|
||||
tl.store(blk_q_st_ptr, tl.load(blk_q_ptr))
|
||||
tl.store(blk_k_st_ptr, tl.load(blk_k_ptr))
|
||||
tl.store(blk_v_st_ptr, tl.load(blk_v_ptr))
|
||||
@@ -66,8 +67,7 @@ def fused_qkvzba_split_reshape_cat_kernel(
|
||||
tl.store(blk_b_st_ptr, tl.load(blk_b_ptr))
|
||||
for i in tl.static_range(b_end, a_end):
|
||||
blk_a_ptr = mixed_ba + i_bs * NUM_HEADS_QK * BA_DIM_T + i_qk * BA_DIM_T + i
|
||||
blk_a_st_ptr = (a + i_bs * NUM_HEADS_V +
|
||||
i_qk * NUM_HEADS_V // NUM_HEADS_QK + (i - b_end))
|
||||
blk_a_st_ptr = a + i_bs * NUM_HEADS_V + i_qk * NUM_HEADS_V // NUM_HEADS_QK + (i - b_end)
|
||||
tl.store(blk_a_st_ptr, tl.load(blk_a_ptr))
|
||||
|
||||
|
||||
|
||||
@@ -15,8 +15,7 @@ from vllm_ascend.ops.triton.triton_utils import get_vectorcore_num
|
||||
|
||||
|
||||
@triton.jit
|
||||
def l2norm_fwd_kernel2_loop(X, Y, eps, M, N: tl.constexpr,
|
||||
MBLOCK: tl.constexpr, NUM_CHUNKS: tl.constexpr):
|
||||
def l2norm_fwd_kernel2_loop(X, Y, eps, M, N: tl.constexpr, MBLOCK: tl.constexpr, NUM_CHUNKS: tl.constexpr):
|
||||
base_row = tl.program_id(0) * (NUM_CHUNKS * MBLOCK)
|
||||
rindex = tl.arange(0, N)[None, :]
|
||||
|
||||
@@ -24,8 +23,7 @@ def l2norm_fwd_kernel2_loop(X, Y, eps, M, N: tl.constexpr,
|
||||
row_idx = base_row + chunk * MBLOCK + tl.arange(0, MBLOCK)[:, None]
|
||||
xmask = row_idx < M
|
||||
|
||||
xs = tl.load(X + (rindex + N * row_idx), mask=xmask,
|
||||
other=0.0).to(tl.float32)
|
||||
xs = tl.load(X + (rindex + N * row_idx), mask=xmask, other=0.0).to(tl.float32)
|
||||
square = xs * xs
|
||||
square_sum = tl.sum(square, 1)[:, None]
|
||||
rsqrt = tl.rsqrt(square_sum + eps)
|
||||
@@ -33,9 +31,7 @@ def l2norm_fwd_kernel2_loop(X, Y, eps, M, N: tl.constexpr,
|
||||
tl.store(Y + (rindex + N * row_idx), xs * rsqrt, xmask)
|
||||
|
||||
|
||||
def l2norm_fwd(x: torch.Tensor,
|
||||
eps: float = 1e-6,
|
||||
output_dtype: torch.dtype | None = None):
|
||||
def l2norm_fwd(x: torch.Tensor, eps: float = 1e-6, output_dtype: torch.dtype | None = None):
|
||||
x_shape_og = x.shape
|
||||
x = x.reshape(-1, x.shape[-1])
|
||||
# allocate output
|
||||
@@ -56,7 +52,7 @@ def l2norm_fwd(x: torch.Tensor,
|
||||
num_core = get_vectorcore_num()
|
||||
main_bs = triton.cdiv(T, num_core)
|
||||
num_sub_blocks = triton.cdiv(main_bs, MBLOCK)
|
||||
grid = (num_core, )
|
||||
grid = (num_core,)
|
||||
l2norm_fwd_kernel2_loop[grid](
|
||||
X=x,
|
||||
Y=y,
|
||||
|
||||
@@ -12,10 +12,12 @@ from vllm.triton_utils import tl, triton
|
||||
MAX_CORES = 65535
|
||||
|
||||
|
||||
@triton.heuristics({
|
||||
"HAS_BIAS": lambda args: args["B"] is not None,
|
||||
"HAS_Z": lambda args: args["Z"] is not None,
|
||||
})
|
||||
@triton.heuristics(
|
||||
{
|
||||
"HAS_BIAS": lambda args: args["B"] is not None,
|
||||
"HAS_Z": lambda args: args["Z"] is not None,
|
||||
}
|
||||
)
|
||||
@triton.jit
|
||||
def layer_norm_fwd_kernel(
|
||||
X, # pointer to the input
|
||||
@@ -49,13 +51,10 @@ def layer_norm_fwd_kernel(
|
||||
n_iters = n_iters + 1
|
||||
|
||||
for i in tl.range(n_iters):
|
||||
X_base = X + (i * BLOCK_ROWS *
|
||||
stride_x_row) + row * stride_x_row + group * N
|
||||
Y_base = Y + (i * BLOCK_ROWS *
|
||||
stride_y_row) + row * stride_y_row + group * N
|
||||
X_base = X + (i * BLOCK_ROWS * stride_x_row) + row * stride_x_row + group * N
|
||||
Y_base = Y + (i * BLOCK_ROWS * stride_y_row) + row * stride_y_row + group * N
|
||||
if HAS_Z:
|
||||
Z_base = Z + (i * BLOCK_ROWS *
|
||||
stride_z_row) + row * stride_z_row + group * N
|
||||
Z_base = Z + (i * BLOCK_ROWS * stride_z_row) + row * stride_z_row + group * N
|
||||
if not IS_RMS_NORM:
|
||||
Mean_base = Mean + (i * BLOCK_ROWS) + group * M
|
||||
Rstd_base = Rstd + (i * BLOCK_ROWS) + group * M
|
||||
@@ -64,17 +63,17 @@ def layer_norm_fwd_kernel(
|
||||
B_base = B + group * N
|
||||
# Compute mean and variance
|
||||
cols = tl.arange(0, BLOCK_N)
|
||||
x = tl.load(X_base + cols, mask=cols < N, other=0.).to(tl.float32)
|
||||
x = tl.load(X_base + cols, mask=cols < N, other=0.0).to(tl.float32)
|
||||
if HAS_Z and not NORM_BEFORE_GATE:
|
||||
z = tl.load(Z_base + cols, mask=cols < N).to(tl.float32)
|
||||
x *= z * tl.sigmoid(z)
|
||||
if not IS_RMS_NORM:
|
||||
mean = tl.sum(x, axis=0) / N
|
||||
tl.store(Mean_base + row, mean)
|
||||
xbar = tl.where(cols < N, x - mean, 0.)
|
||||
xbar = tl.where(cols < N, x - mean, 0.0)
|
||||
var = tl.sum(xbar * xbar, axis=0) / N
|
||||
else:
|
||||
xbar = tl.where(cols < N, x, 0.)
|
||||
xbar = tl.where(cols < N, x, 0.0)
|
||||
var = tl.sum(xbar * xbar, axis=0) / N
|
||||
rstd = 1 / tl.sqrt(var + eps)
|
||||
tl.store(Rstd_base + row, rstd)
|
||||
@@ -112,26 +111,24 @@ def _layer_norm_fwd(
|
||||
if z is not None:
|
||||
assert z.stride(-1) == 1
|
||||
assert z.shape == (M, N)
|
||||
assert weight.shape == (N, )
|
||||
assert weight.shape == (N,)
|
||||
assert weight.stride(-1) == 1
|
||||
if bias is not None:
|
||||
assert bias.stride(-1) == 1
|
||||
assert bias.shape == (N, )
|
||||
assert bias.shape == (N,)
|
||||
# allocate output
|
||||
if out is not None:
|
||||
assert out.shape == x.shape
|
||||
else:
|
||||
out = torch.empty_like(x)
|
||||
assert out.stride(-1) == 1
|
||||
mean = (torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device)
|
||||
if not is_rms_norm else None)
|
||||
rstd = torch.empty((ngroups * M, ), dtype=torch.float32, device=x.device)
|
||||
mean = torch.empty((ngroups * M,), dtype=torch.float32, device=x.device) if not is_rms_norm else None
|
||||
rstd = torch.empty((ngroups * M,), dtype=torch.float32, device=x.device)
|
||||
# Less than 64KB per feature: enqueue fused kernel
|
||||
MAX_FUSED_SIZE = 65536 // x.element_size()
|
||||
BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(group_size))
|
||||
if group_size > BLOCK_N:
|
||||
raise RuntimeError(
|
||||
"This layer norm doesn't support feature dim >= 64KB.")
|
||||
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
|
||||
# heuristics for number of warps
|
||||
num_warps = min(max(BLOCK_N // 256, 1), 8)
|
||||
grid = (M if M < MAX_CORES else MAX_CORES, ngroups)
|
||||
@@ -160,7 +157,6 @@ def _layer_norm_fwd(
|
||||
|
||||
|
||||
class LayerNormFn(torch.autograd.Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(
|
||||
ctx,
|
||||
|
||||
@@ -14,7 +14,7 @@ import os
|
||||
import torch
|
||||
from vllm.triton_utils import tl, tldevice, triton
|
||||
|
||||
if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
|
||||
if os.environ.get("FLA_USE_FAST_OPS", "0") == "1":
|
||||
div = tldevice.fast_dividef
|
||||
exp = tldevice.fast_expf
|
||||
log = tldevice.fast_logf
|
||||
@@ -31,17 +31,15 @@ else:
|
||||
log2 = tl.log2
|
||||
|
||||
|
||||
@triton.heuristics({
|
||||
'USE_INITIAL_STATE':
|
||||
lambda args: args['h0'] is not None,
|
||||
'IS_VARLEN':
|
||||
lambda args: args['cu_seqlens'] is not None,
|
||||
"IS_CONTINUOUS_BATCHING":
|
||||
lambda args: args['ssm_state_indices'] is not None,
|
||||
"IS_SPEC_DECODING":
|
||||
lambda args: args['num_accepted_tokens'] is not None,
|
||||
})
|
||||
@triton.jit(do_not_specialize=['N', 'T'])
|
||||
@triton.heuristics(
|
||||
{
|
||||
"USE_INITIAL_STATE": lambda args: args["h0"] is not None,
|
||||
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
|
||||
"IS_CONTINUOUS_BATCHING": lambda args: args["ssm_state_indices"] is not None,
|
||||
"IS_SPEC_DECODING": lambda args: args["num_accepted_tokens"] is not None,
|
||||
}
|
||||
)
|
||||
@triton.jit(do_not_specialize=["N", "T"])
|
||||
def fused_recurrent_gated_delta_rule_fwd_kernel(
|
||||
q,
|
||||
k,
|
||||
@@ -70,8 +68,7 @@ def fused_recurrent_gated_delta_rule_fwd_kernel(
|
||||
stride_indices_tok: tl.constexpr,
|
||||
USE_INITIAL_STATE: tl.constexpr, # whether to use initial state
|
||||
INPLACE_FINAL_STATE: tl.constexpr, # whether to store final state inplace
|
||||
IS_BETA_HEADWISE: tl.
|
||||
constexpr, # whether beta is headwise vector or scalar,
|
||||
IS_BETA_HEADWISE: tl.constexpr, # whether beta is headwise vector or scalar,
|
||||
USE_QK_L2NORM_IN_KERNEL: tl.constexpr,
|
||||
IS_VARLEN: tl.constexpr,
|
||||
IS_CONTINUOUS_BATCHING: tl.constexpr,
|
||||
@@ -82,8 +79,7 @@ def fused_recurrent_gated_delta_rule_fwd_kernel(
|
||||
i_n, i_hv = i_nh // HV, i_nh % HV
|
||||
i_h = i_hv // (HV // H)
|
||||
if IS_VARLEN:
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(
|
||||
tl.int64), tl.load(cu_seqlens + i_n + 1).to(tl.int64)
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(tl.int64), tl.load(cu_seqlens + i_n + 1).to(tl.int64)
|
||||
all = T
|
||||
T = eos - bos
|
||||
else:
|
||||
@@ -108,8 +104,9 @@ def fused_recurrent_gated_delta_rule_fwd_kernel(
|
||||
i_t = tl.load(num_accepted_tokens + i_n).to(tl.int64) - 1
|
||||
else:
|
||||
i_t = 0
|
||||
p_h0 = h0 + tl.load(ssm_state_indices + i_n * stride_indices_seq +
|
||||
i_t).to(tl.int64) * stride_init_state_token
|
||||
p_h0 = (
|
||||
h0 + tl.load(ssm_state_indices + i_n * stride_indices_seq + i_t).to(tl.int64) * stride_init_state_token
|
||||
)
|
||||
else:
|
||||
p_h0 = h0 + bos * HV * K * V
|
||||
p_h0 = p_h0 + i_hv * K * V + o_k[:, None] * V + o_v[None, :]
|
||||
@@ -164,18 +161,21 @@ def fused_recurrent_gated_delta_rule_fwd_kernel(
|
||||
|
||||
# keep the states for multi-query tokens
|
||||
if INPLACE_FINAL_STATE:
|
||||
p_ht = ht + tl.load(ssm_state_indices + i_n * stride_indices_seq +
|
||||
i_t).to(tl.int64) * stride_final_state_token
|
||||
p_ht = (
|
||||
ht + tl.load(ssm_state_indices + i_n * stride_indices_seq + i_t).to(tl.int64) * stride_final_state_token
|
||||
)
|
||||
else:
|
||||
p_ht = ht + (bos + i_t) * stride_final_state_token
|
||||
p_ht = p_ht + i_hv * K * V + o_k[:, None] * V + o_v[None, :]
|
||||
tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), mask=mask_h)
|
||||
|
||||
|
||||
@triton.heuristics({
|
||||
"USE_INITIAL_STATE": lambda args: args["h0_source"] is not None,
|
||||
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
|
||||
})
|
||||
@triton.heuristics(
|
||||
{
|
||||
"USE_INITIAL_STATE": lambda args: args["h0_source"] is not None,
|
||||
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
|
||||
}
|
||||
)
|
||||
@triton.jit(do_not_specialize=["T"])
|
||||
def fused_sigmoid_gating_delta_rule_update_kernel(
|
||||
A_log,
|
||||
@@ -245,8 +245,7 @@ def fused_sigmoid_gating_delta_rule_update_kernel(
|
||||
idx = tl.load(h0_indices + i_n)
|
||||
# if idx >= 0:
|
||||
tmp0 = tl.where(idx < 0, 0, idx)
|
||||
p_h0 = (h0_source + tmp0 * HV * K * V + i_hv * K * V +
|
||||
o_k[:, None] * V + o_v[None, :])
|
||||
p_h0 = h0_source + tmp0 * HV * K * V + i_hv * K * V + o_k[:, None] * V + o_v[None, :]
|
||||
temp1 = tl.load(p_h0, mask=mask_h, other=0).to(tl.float32)
|
||||
temp2 = tl.zeros_like(temp1)
|
||||
value0 = tl.where(idx < 0, temp2, temp1)
|
||||
@@ -314,8 +313,7 @@ def fused_sigmoid_gating_delta_rule_update_kernel(
|
||||
if USE_INITIAL_STATE:
|
||||
idx = tl.load(h0_indices + i_n)
|
||||
if idx >= 0:
|
||||
p_h0 = (h0_source + idx * HV * K * V + i_hv * K * V +
|
||||
o_k[:, None] * V + o_v[None, :])
|
||||
p_h0 = h0_source + idx * HV * K * V + i_hv * K * V + o_k[:, None] * V + o_v[None, :]
|
||||
tl.store(p_h0, b_h.to(p_h0.dtype.element_ty), mask=mask_h)
|
||||
|
||||
|
||||
@@ -350,7 +348,7 @@ def fused_sigmoid_gating_delta_rule_update(
|
||||
num_warps = 1
|
||||
|
||||
if scale is None:
|
||||
scale = k.shape[-1]**-0.5
|
||||
scale = k.shape[-1] ** -0.5
|
||||
else:
|
||||
assert scale > 0, "scale must be positive"
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
|
||||
# ruff: noqa: E501
|
||||
# mypy: ignore-errors
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from vllm.triton_utils import tl, triton
|
||||
@@ -66,9 +65,13 @@ def solve_tril_16x16_kernel(
|
||||
offs_cols_in_block = tl.arange(0, 16)
|
||||
|
||||
# 2 Calculate the pointer of each element
|
||||
ptr_A_subrec16 = (A + row_start_o * H * BT + col_start_o +
|
||||
offs_rows_in_block[:, None] * H * BT +
|
||||
offs_cols_in_block[None, :])
|
||||
ptr_A_subrec16 = (
|
||||
A
|
||||
+ row_start_o * H * BT
|
||||
+ col_start_o
|
||||
+ offs_rows_in_block[:, None] * H * BT
|
||||
+ offs_cols_in_block[None, :]
|
||||
)
|
||||
|
||||
# 3 Create a mask to prevent out-of-bounds access
|
||||
global_rows = row_start_o + offs_rows_in_block[:, None]
|
||||
@@ -76,14 +79,14 @@ def solve_tril_16x16_kernel(
|
||||
load_mask = (global_rows < T) & (global_cols < BT)
|
||||
|
||||
# 4 Use mask to safely load data
|
||||
b_A_subrec16 = tl.load(ptr_A_subrec16, mask=load_mask,
|
||||
other=0.0).to(tl.float32)
|
||||
b_A_subrec16 = tl.load(ptr_A_subrec16, mask=load_mask, other=0.0).to(tl.float32)
|
||||
b_A = tl.insert_slice(
|
||||
ful=b_A,
|
||||
sub=b_A_subrec16[None, :, :], # (1, 16, 16)
|
||||
offsets=[blkid, 0, 0],
|
||||
sizes=[1, 16, 16],
|
||||
strides=[1, 1, 1])
|
||||
strides=[1, 1, 1],
|
||||
)
|
||||
|
||||
local_ori_A = tl.trans(b_A, (1, 0, 2))
|
||||
local_ori_A = tl.reshape(local_ori_A, (16, 16 * N_BLOCKS))
|
||||
@@ -97,9 +100,7 @@ def solve_tril_16x16_kernel(
|
||||
|
||||
# for loop to update N_BLOCKS row vector
|
||||
for i in range(1, 16):
|
||||
nblks_vec16 = -tl.extract_slice(local_ori_A, (i, 0),
|
||||
(1, 16 * N_BLOCKS),
|
||||
(16 * N_BLOCKS, 1))
|
||||
nblks_vec16 = -tl.extract_slice(local_ori_A, (i, 0), (1, 16 * N_BLOCKS), (16 * N_BLOCKS, 1))
|
||||
b_a = tl.reshape(nblks_vec16, (N_BLOCKS, 16))
|
||||
|
||||
dot_tmp = tl.trans(b_a[:, :, None] * b_A, (1, 0, 2))
|
||||
@@ -107,34 +108,27 @@ def solve_tril_16x16_kernel(
|
||||
b_a = b_a + dot_product
|
||||
|
||||
b_a_new_expanded = b_a[:, None, :]
|
||||
b_A = tl.insert_slice(ful=b_A,
|
||||
sub=b_a_new_expanded,
|
||||
offsets=[0, i, 0],
|
||||
sizes=[N_BLOCKS, 1, 16],
|
||||
strides=[1, 1, 1])
|
||||
b_A = tl.insert_slice(
|
||||
ful=b_A, sub=b_a_new_expanded, offsets=[0, i, 0], sizes=[N_BLOCKS, 1, 16], strides=[1, 1, 1]
|
||||
)
|
||||
|
||||
on_diagonal = (rows == cols)
|
||||
on_diagonal = rows == cols
|
||||
b_A = tl.where(on_diagonal, b_A + 1.0, b_A)
|
||||
|
||||
b_A = tl.reshape(b_A, (N_BLOCKS * 16, 16))
|
||||
p_Ai = tl.make_block_ptr(Ad, (T, 16), (H * 16, 1), (base_t, 0),
|
||||
(N_BLOCKS * 16, 16), (1, 0))
|
||||
p_Ai = tl.make_block_ptr(Ad, (T, 16), (H * 16, 1), (base_t, 0), (N_BLOCKS * 16, 16), (1, 0))
|
||||
|
||||
# 1 Create in-block offset
|
||||
offs_rows_to_store = tl.arange(0, N_BLOCKS * 16)
|
||||
offs_cols_to_store = tl.arange(0, 16)
|
||||
|
||||
# 2 Calculate the pointer of each element
|
||||
p_Ai = (Ad + base_t * H * 16 + 0 +
|
||||
offs_rows_to_store[:, None] * H * 16 +
|
||||
offs_cols_to_store[None, :])
|
||||
p_Ai = Ad + base_t * H * 16 + 0 + offs_rows_to_store[:, None] * H * 16 + offs_cols_to_store[None, :]
|
||||
# 3 Create a mask to prevent out-of-bounds access, only check rows
|
||||
global_store_rows = base_t + offs_rows_to_store[:, None]
|
||||
store_mask = global_store_rows < T
|
||||
# 4 use mask to save data safely
|
||||
tl.store(p_Ai,
|
||||
b_A.to(p_Ai.dtype.element_ty, fp_downcast_rounding="rtne"),
|
||||
mask=store_mask)
|
||||
tl.store(p_Ai, b_A.to(p_Ai.dtype.element_ty, fp_downcast_rounding="rtne"), mask=store_mask)
|
||||
|
||||
|
||||
@triton.heuristics({"IS_VARLEN": lambda args: args["cu_seqlens"] is not None})
|
||||
@@ -169,18 +163,12 @@ def merge_16x16_to_32x32_inverse_kernel(
|
||||
Ad += (bos * H + i_h) * 16
|
||||
Ai += (bos * H + i_h) * 32
|
||||
|
||||
p_A_21 = tl.make_block_ptr(A, (T, 32), (H * 32, 1), (i_t * 32 + 16, 0),
|
||||
(16, 16), (1, 0))
|
||||
p_Ad_11 = tl.make_block_ptr(Ad, (T, 16), (H * 16, 1), (i_t * 32, 0),
|
||||
(16, 16), (1, 0))
|
||||
p_Ad_22 = tl.make_block_ptr(Ad, (T, 16), (H * 16, 1), (i_t * 32 + 16, 0),
|
||||
(16, 16), (1, 0))
|
||||
p_Ai_11 = tl.make_block_ptr(Ai, (T, 32), (H * 32, 1), (i_t * 32, 0),
|
||||
(16, 16), (1, 0))
|
||||
p_Ai_22 = tl.make_block_ptr(Ai, (T, 32), (H * 32, 1), (i_t * 32 + 16, 16),
|
||||
(16, 16), (1, 0))
|
||||
p_Ai_21 = tl.make_block_ptr(Ai, (T, 32), (H * 32, 1), (i_t * 32 + 16, 0),
|
||||
(16, 16), (1, 0))
|
||||
p_A_21 = tl.make_block_ptr(A, (T, 32), (H * 32, 1), (i_t * 32 + 16, 0), (16, 16), (1, 0))
|
||||
p_Ad_11 = tl.make_block_ptr(Ad, (T, 16), (H * 16, 1), (i_t * 32, 0), (16, 16), (1, 0))
|
||||
p_Ad_22 = tl.make_block_ptr(Ad, (T, 16), (H * 16, 1), (i_t * 32 + 16, 0), (16, 16), (1, 0))
|
||||
p_Ai_11 = tl.make_block_ptr(Ai, (T, 32), (H * 32, 1), (i_t * 32, 0), (16, 16), (1, 0))
|
||||
p_Ai_22 = tl.make_block_ptr(Ai, (T, 32), (H * 32, 1), (i_t * 32 + 16, 16), (16, 16), (1, 0))
|
||||
p_Ai_21 = tl.make_block_ptr(Ai, (T, 32), (H * 32, 1), (i_t * 32 + 16, 0), (16, 16), (1, 0))
|
||||
|
||||
A_21 = tl.load(p_A_21, boundary_check=(0, 1)).to(tl.float32)
|
||||
Ai_11 = tl.load(p_Ad_11, boundary_check=(0, 1)).to(tl.float32)
|
||||
@@ -313,26 +301,20 @@ def merge_16x16_to_64x64_inverse_kernel(
|
||||
offs_n = tl.arange(0, 32)
|
||||
mask_store = (offs_m[:, None] < T) & (offs_n[None, :] < 64)
|
||||
ptr_Ai = Ai + offs_m[:, None] * (H * 64) + offs_n[None, :]
|
||||
tl.store(ptr_Ai,
|
||||
Ai_11_32.to(ptr_Ai.dtype.element_ty, fp_downcast_rounding="rtne"),
|
||||
mask=mask_store)
|
||||
tl.store(ptr_Ai, Ai_11_32.to(ptr_Ai.dtype.element_ty, fp_downcast_rounding="rtne"), mask=mask_store)
|
||||
|
||||
# store Ai_22_32 to (i_t * 64 + 32, 32)
|
||||
offs_m = i_t * 64 + 32 + tl.arange(0, 32)
|
||||
offs_n = 32 + tl.arange(0, 32)
|
||||
mask_store = (offs_m[:, None] < T) & (offs_n[None, :] < 64)
|
||||
ptr_Ai = Ai + offs_m[:, None] * (H * 64) + offs_n[None, :]
|
||||
tl.store(ptr_Ai,
|
||||
Ai_22_32.to(ptr_Ai.dtype.element_ty, fp_downcast_rounding="rtne"),
|
||||
mask=mask_store)
|
||||
tl.store(ptr_Ai, Ai_22_32.to(ptr_Ai.dtype.element_ty, fp_downcast_rounding="rtne"), mask=mask_store)
|
||||
|
||||
# store Ai_21_32 to (i_t * 64 + 32, 32)
|
||||
offs_n = tl.arange(0, 32)
|
||||
mask_store = (offs_m[:, None] < T) & (offs_n[None, :] < 64)
|
||||
ptr_Ai = Ai + offs_m[:, None] * (H * 64) + offs_n[None, :]
|
||||
tl.store(ptr_Ai,
|
||||
Ai_21_32.to(ptr_Ai.dtype.element_ty, fp_downcast_rounding="rtne"),
|
||||
mask=mask_store)
|
||||
tl.store(ptr_Ai, Ai_21_32.to(ptr_Ai.dtype.element_ty, fp_downcast_rounding="rtne"), mask=mask_store)
|
||||
|
||||
# zero out the upper-right 32 * 32 block (rows 0 ~ 31, cols 32 ~ 63)
|
||||
offs_m = i_t * 64 + tl.arange(0, 32)
|
||||
@@ -345,7 +327,7 @@ def merge_16x16_to_64x64_inverse_kernel(
|
||||
|
||||
def solve_tril(
|
||||
A: torch.Tensor,
|
||||
cu_seqlens: Optional[torch.Tensor] = None,
|
||||
cu_seqlens: torch.Tensor | None = None,
|
||||
output_dtype: torch.dtype = torch.float,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
@@ -367,19 +349,12 @@ def solve_tril(
|
||||
assert A.shape[-1] in [16, 32, 64]
|
||||
|
||||
B, T, H, BT = A.shape
|
||||
Ad = torch.empty(B,
|
||||
T,
|
||||
H,
|
||||
16,
|
||||
device=A.device,
|
||||
dtype=torch.float if BT != 16 else output_dtype)
|
||||
Ad = torch.empty(B, T, H, 16, device=A.device, dtype=torch.float if BT != 16 else output_dtype)
|
||||
|
||||
LARGE_BLOCK_T = 608 * 2
|
||||
|
||||
chunk_indices = (prepare_chunk_indices(cu_seqlens, LARGE_BLOCK_T)
|
||||
if cu_seqlens is not None else None)
|
||||
NT = len(chunk_indices) if cu_seqlens is not None else triton.cdiv(
|
||||
T, LARGE_BLOCK_T)
|
||||
chunk_indices = prepare_chunk_indices(cu_seqlens, LARGE_BLOCK_T) if cu_seqlens is not None else None
|
||||
NT = len(chunk_indices) if cu_seqlens is not None else triton.cdiv(T, LARGE_BLOCK_T)
|
||||
|
||||
solve_tril_16x16_kernel[NT, B * H](
|
||||
A=A,
|
||||
@@ -398,10 +373,8 @@ def solve_tril(
|
||||
return Ad
|
||||
|
||||
Ai = torch.empty(B, T, H, BT, device=A.device, dtype=output_dtype)
|
||||
merge_fn = (merge_16x16_to_32x32_inverse_kernel
|
||||
if BT == 32 else merge_16x16_to_64x64_inverse_kernel)
|
||||
chunk_indices = (prepare_chunk_indices(cu_seqlens, BT)
|
||||
if cu_seqlens is not None else None)
|
||||
merge_fn = merge_16x16_to_32x32_inverse_kernel if BT == 32 else merge_16x16_to_64x64_inverse_kernel
|
||||
chunk_indices = prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
|
||||
NT = len(chunk_indices) if cu_seqlens is not None else triton.cdiv(T, BT)
|
||||
|
||||
merge_fn[NT, B * H](
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# ruff: noqa: E501
|
||||
import contextlib
|
||||
import functools
|
||||
from typing import Callable
|
||||
from collections.abc import Callable
|
||||
|
||||
import torch
|
||||
from vllm.triton_utils import tl, triton
|
||||
@@ -19,38 +19,24 @@ def prepare_lens(cu_seqlens: torch.LongTensor) -> torch.LongTensor:
|
||||
return cu_seqlens[1:] - cu_seqlens[:-1]
|
||||
|
||||
|
||||
def prepare_chunk_indices(cu_seqlens: torch.LongTensor,
|
||||
chunk_size: int) -> torch.LongTensor:
|
||||
indices = torch.cat([
|
||||
torch.arange(n)
|
||||
for n in triton.cdiv(prepare_lens(cu_seqlens), chunk_size).tolist()
|
||||
])
|
||||
return torch.stack([indices.eq(0).cumsum(0) - 1, indices],
|
||||
1).to(cu_seqlens)
|
||||
def prepare_chunk_indices(cu_seqlens: torch.LongTensor, chunk_size: int) -> torch.LongTensor:
|
||||
indices = torch.cat([torch.arange(n) for n in triton.cdiv(prepare_lens(cu_seqlens), chunk_size).tolist()])
|
||||
return torch.stack([indices.eq(0).cumsum(0) - 1, indices], 1).to(cu_seqlens)
|
||||
|
||||
|
||||
def prepare_chunk_offsets(cu_seqlens: torch.LongTensor,
|
||||
chunk_size: int) -> torch.LongTensor:
|
||||
return torch.cat([
|
||||
cu_seqlens.new_tensor([0]),
|
||||
triton.cdiv(prepare_lens(cu_seqlens), chunk_size)
|
||||
]).cumsum(-1)
|
||||
def prepare_chunk_offsets(cu_seqlens: torch.LongTensor, chunk_size: int) -> torch.LongTensor:
|
||||
return torch.cat([cu_seqlens.new_tensor([0]), triton.cdiv(prepare_lens(cu_seqlens), chunk_size)]).cumsum(-1)
|
||||
|
||||
|
||||
def input_guard(
|
||||
fn: Callable[..., torch.Tensor]) -> Callable[..., torch.Tensor]:
|
||||
def input_guard(fn: Callable[..., torch.Tensor]) -> Callable[..., torch.Tensor]:
|
||||
"""
|
||||
A decorator to make sure all input tensors are contiguous and set the device based on input tensors.
|
||||
"""
|
||||
|
||||
@functools.wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
contiguous_args = (i if not isinstance(i, torch.Tensor) else
|
||||
i.contiguous() for i in args)
|
||||
contiguous_kwargs = {
|
||||
k: (v if not isinstance(v, torch.Tensor) else v.contiguous())
|
||||
for k, v in kwargs.items()
|
||||
}
|
||||
contiguous_args = (i if not isinstance(i, torch.Tensor) else i.contiguous() for i in args)
|
||||
contiguous_kwargs = {k: (v if not isinstance(v, torch.Tensor) else v.contiguous()) for k, v in kwargs.items()}
|
||||
|
||||
tensor = None
|
||||
for arg in args:
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
|
||||
# ruff: noqa: E501
|
||||
# mypy: ignore-errors
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from vllm.triton_utils import tl, triton
|
||||
@@ -17,23 +16,39 @@ from vllm.triton_utils import tl, triton
|
||||
from .utils import prepare_chunk_indices
|
||||
|
||||
|
||||
@triton.heuristics({'IS_VARLEN': lambda args: args['cu_seqlens'] is not None})
|
||||
@triton.jit(do_not_specialize=['T'])
|
||||
def recompute_w_u_fwd_kernel(k, v, beta, w, u, A, g, cu_seqlens, chunk_indices,
|
||||
T, H: tl.constexpr, Hg: tl.constexpr,
|
||||
K: tl.constexpr, V: tl.constexpr,
|
||||
BT: tl.constexpr, BK: tl.constexpr,
|
||||
BV: tl.constexpr, IS_VARLEN: tl.constexpr):
|
||||
@triton.heuristics({"IS_VARLEN": lambda args: args["cu_seqlens"] is not None})
|
||||
@triton.jit(do_not_specialize=["T"])
|
||||
def recompute_w_u_fwd_kernel(
|
||||
k,
|
||||
v,
|
||||
beta,
|
||||
w,
|
||||
u,
|
||||
A,
|
||||
g,
|
||||
cu_seqlens,
|
||||
chunk_indices,
|
||||
T,
|
||||
H: tl.constexpr,
|
||||
Hg: tl.constexpr,
|
||||
K: tl.constexpr,
|
||||
V: tl.constexpr,
|
||||
BT: tl.constexpr,
|
||||
BK: tl.constexpr,
|
||||
BV: tl.constexpr,
|
||||
IS_VARLEN: tl.constexpr,
|
||||
):
|
||||
T_max = T
|
||||
i_t_o = tl.program_id(0)
|
||||
|
||||
for i_bh in range(H):
|
||||
i_b, i_h = i_bh // H, i_bh % H
|
||||
if IS_VARLEN:
|
||||
i_n, i_t = tl.load(chunk_indices + i_t_o * 2).to(
|
||||
tl.int32), tl.load(chunk_indices + i_t_o * 2 + 1).to(tl.int32)
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(
|
||||
tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
|
||||
i_n, i_t = (
|
||||
tl.load(chunk_indices + i_t_o * 2).to(tl.int32),
|
||||
tl.load(chunk_indices + i_t_o * 2 + 1).to(tl.int32),
|
||||
)
|
||||
bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
|
||||
T = eos - bos
|
||||
else:
|
||||
bos, eos = i_b * T, i_b * T + T
|
||||
@@ -44,7 +59,7 @@ def recompute_w_u_fwd_kernel(k, v, beta, w, u, A, g, cu_seqlens, chunk_indices,
|
||||
|
||||
offs_t_2d = global_offs_t[:, None]
|
||||
offs_bt = tl.arange(0, BT)[None, :]
|
||||
ptr_A = (A + (bos * H + i_h) * BT + offs_t_2d * (H * BT) + offs_bt * 1)
|
||||
ptr_A = A + (bos * H + i_h) * BT + offs_t_2d * (H * BT) + offs_bt * 1
|
||||
mask_A = mask_t[:, None]
|
||||
b_A = tl.load(ptr_A, mask=mask_A, other=0.0).to(tl.float32)
|
||||
|
||||
@@ -58,29 +73,25 @@ def recompute_w_u_fwd_kernel(k, v, beta, w, u, A, g, cu_seqlens, chunk_indices,
|
||||
offs_v = i_v * BV + tl.arange(0, BV)[None, :]
|
||||
mask_v = (mask_t[:, None]) & (offs_v < V)
|
||||
|
||||
ptr_v = (v + (bos * H + i_h) * V + offs_t_2d * (H * V) +
|
||||
offs_v * 1)
|
||||
ptr_v = v + (bos * H + i_h) * V + offs_t_2d * (H * V) + offs_v * 1
|
||||
b_v = tl.load(ptr_v, mask=mask_v, other=0.0).to(tl.float32)
|
||||
|
||||
b_vb = (b_v * b_beta[:, None])
|
||||
b_vb = b_v * b_beta[:, None]
|
||||
b_u = tl.dot(b_A, b_vb, allow_tf32=False)
|
||||
|
||||
ptr_u = (u + (bos * H + i_h) * V + offs_t_2d * (H * V) +
|
||||
offs_v * 1)
|
||||
ptr_u = u + (bos * H + i_h) * V + offs_t_2d * (H * V) + offs_v * 1
|
||||
tl.store(ptr_u, b_u.to(ptr_u.dtype.element_ty), mask=mask_v)
|
||||
|
||||
for i_k in range(tl.cdiv(K, BK)):
|
||||
offs_k = i_k * BK + tl.arange(0, BK)[None, :]
|
||||
mask_k = (mask_t[:, None]) & (offs_k < K)
|
||||
ptr_k = (k + (bos * Hg + i_h // (H // Hg)) * K + offs_t_2d *
|
||||
(Hg * K) + offs_k * 1)
|
||||
ptr_k = k + (bos * Hg + i_h // (H // Hg)) * K + offs_t_2d * (Hg * K) + offs_k * 1
|
||||
b_k = tl.load(ptr_k, mask=mask_k, other=0.0).to(tl.float32)
|
||||
|
||||
b_kb = (b_k * b_beta[:, None] * b_g[:, None])
|
||||
b_kb = b_k * b_beta[:, None] * b_g[:, None]
|
||||
b_w = tl.dot(b_A, b_kb)
|
||||
|
||||
ptr_w = (w + (bos * H + i_h) * K + offs_t_2d * (H * K) +
|
||||
offs_k * 1)
|
||||
ptr_w = w + (bos * H + i_h) * K + offs_t_2d * (H * K) + offs_k * 1
|
||||
tl.store(ptr_w, b_w.to(ptr_w.dtype.element_ty), mask=mask_k)
|
||||
|
||||
|
||||
@@ -90,14 +101,13 @@ def recompute_w_u_fwd(
|
||||
beta: torch.Tensor,
|
||||
g_cumsum: torch.Tensor,
|
||||
A: torch.Tensor,
|
||||
cu_seqlens: Optional[torch.LongTensor] = None,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
cu_seqlens: torch.LongTensor | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
B, T, Hg, K, V = *k.shape, v.shape[-1]
|
||||
H = v.shape[-2]
|
||||
BT = A.shape[-1]
|
||||
|
||||
chunk_indices = prepare_chunk_indices(cu_seqlens, BT) \
|
||||
if cu_seqlens is not None else None
|
||||
chunk_indices = prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
|
||||
NT = triton.cdiv(T, BT) if cu_seqlens is None else len(chunk_indices)
|
||||
|
||||
BK = 64
|
||||
|
||||
Reference in New Issue
Block a user