### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
| `vllm_ascend/ops/fused_moe/comm_utils.py` |
| `vllm_ascend/ops/fused_moe/experts_selector.py` |
| `vllm_ascend/ops/fused_moe/fused_moe.py` |
| `vllm_ascend/ops/fused_moe/moe_comm_method.py` |
| `vllm_ascend/ops/fused_moe/moe_mlp.py` |
| `vllm_ascend/ops/fused_moe/prepare_finalize.py` |
| `vllm_ascend/ops/fused_moe/token_dispatcher.py` |
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.14.0
- vLLM main:
d68209402d
Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: SILONG ZENG <2609716663@qq.com>
This commit is contained in:
@@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch_npu
|
||||
@@ -23,24 +22,22 @@ from vllm.forward_context import get_forward_context
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
|
||||
from vllm_ascend.ascend_forward_context import MoECommType
|
||||
from vllm_ascend.utils import (AscendDeviceType, dispose_tensor,
|
||||
enable_custom_op, get_ascend_device_type,
|
||||
get_weight_prefetch_method)
|
||||
from vllm_ascend.utils import (
|
||||
dispose_tensor,
|
||||
enable_custom_op,
|
||||
get_weight_prefetch_method,
|
||||
)
|
||||
|
||||
|
||||
def _custom_gmm_swiglu_enabled(fusion, dynamic_eplb):
|
||||
return fusion and dynamic_eplb and enable_custom_op()
|
||||
|
||||
|
||||
def cumsum_group_list(group_list: torch.Tensor,
|
||||
src_list_type: int,
|
||||
dst_list_type: int,
|
||||
active_num: int = 0,
|
||||
expert_num: int = 0) -> torch.Tensor:
|
||||
def cumsum_group_list(
|
||||
group_list: torch.Tensor, src_list_type: int, dst_list_type: int, active_num: int = 0, expert_num: int = 0
|
||||
) -> torch.Tensor:
|
||||
if src_list_type not in [0, 1, 2]:
|
||||
raise ValueError(
|
||||
f"group_list_type should be in [0, 1, 2], but received {src_list_type}"
|
||||
)
|
||||
raise ValueError(f"group_list_type should be in [0, 1, 2], but received {src_list_type}")
|
||||
|
||||
if src_list_type == dst_list_type:
|
||||
return group_list
|
||||
@@ -53,10 +50,9 @@ def cumsum_group_list(group_list: torch.Tensor,
|
||||
if src_list_type == 2 and dst_list_type == 0:
|
||||
experts = pad(group_list[:, 0], (1, 0))
|
||||
tokens = pad(group_list[:, 1].cumsum(dim=0), (1, 0))
|
||||
cumsum_group_list = torch.full(size=(expert_num, ),
|
||||
fill_value=active_num,
|
||||
dtype=group_list.dtype,
|
||||
device=group_list.device)
|
||||
cumsum_group_list = torch.full(
|
||||
size=(expert_num,), fill_value=active_num, dtype=group_list.dtype, device=group_list.device
|
||||
)
|
||||
|
||||
for i, (start, end) in enumerate(zip(experts[:-1], experts[1:])):
|
||||
if end > start:
|
||||
@@ -65,30 +61,32 @@ def cumsum_group_list(group_list: torch.Tensor,
|
||||
return cumsum_group_list
|
||||
raise NotImplementedError(
|
||||
f"Conversion from src_list_type={src_list_type} to dst_list_type={dst_list_type} is not implemented yet. "
|
||||
"This feature is under development.")
|
||||
"This feature is under development."
|
||||
)
|
||||
|
||||
|
||||
def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
w1: list[torch.Tensor],
|
||||
w1_scale: list[torch.Tensor],
|
||||
w2: list[torch.Tensor],
|
||||
w2_scale: list[torch.Tensor],
|
||||
group_list: torch.Tensor,
|
||||
group_list_type: int = 1,
|
||||
dynamic_scale: torch.Tensor = None,
|
||||
w1_scale_bias: torch.Tensor = None,
|
||||
w2_scale_bias: torch.Tensor = None,
|
||||
w1_offset: Optional[torch.Tensor] = None,
|
||||
w2_offset: Optional[torch.Tensor] = None,
|
||||
fusion: bool = False,
|
||||
dynamic_eplb: bool = False) -> torch.Tensor:
|
||||
def quant_apply_mlp(
|
||||
hidden_states: torch.Tensor,
|
||||
w1: list[torch.Tensor],
|
||||
w1_scale: list[torch.Tensor],
|
||||
w2: list[torch.Tensor],
|
||||
w2_scale: list[torch.Tensor],
|
||||
group_list: torch.Tensor,
|
||||
group_list_type: int = 1,
|
||||
dynamic_scale: torch.Tensor = None,
|
||||
w1_scale_bias: torch.Tensor = None,
|
||||
w2_scale_bias: torch.Tensor = None,
|
||||
w1_offset: torch.Tensor | None = None,
|
||||
w2_offset: torch.Tensor | None = None,
|
||||
fusion: bool = False,
|
||||
dynamic_eplb: bool = False,
|
||||
) -> torch.Tensor:
|
||||
if w1_offset is not None:
|
||||
unquantized_hidden_states = hidden_states
|
||||
quantized_hidden_states = None
|
||||
elif dynamic_scale is None:
|
||||
unquantized_hidden_states = hidden_states
|
||||
hidden_states, pertoken_scale = torch_npu.npu_dynamic_quant(
|
||||
hidden_states)
|
||||
hidden_states, pertoken_scale = torch_npu.npu_dynamic_quant(hidden_states)
|
||||
# Dispose the original unquantized hidden states
|
||||
# to save npu memory because they're no longer used.
|
||||
dispose_tensor(unquantized_hidden_states)
|
||||
@@ -103,22 +101,18 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
|
||||
weight_prefetch_method = get_weight_prefetch_method()
|
||||
if weight_prefetch_method:
|
||||
weight_prefetch_method.maybe_prefetch_moe_weight_postprocess(
|
||||
hidden_states)
|
||||
weight_prefetch_method.maybe_prefetch_moe_weight_postprocess(hidden_states)
|
||||
is_mc2 = get_forward_context().moe_comm_type == MoECommType.MC2
|
||||
if w1_scale_bias is None and w1_offset is None and is_mc2:
|
||||
if _custom_gmm_swiglu_enabled(fusion, dynamic_eplb):
|
||||
# gmm1: gate_up_proj & act_fn: swiglu
|
||||
hidden_states, swiglu_out_scale, _ = (
|
||||
torch.ops._C_ascend.
|
||||
grouped_matmul_swiglu_quant_weight_nz_tensor_list(
|
||||
x=hidden_states,
|
||||
weight=w1,
|
||||
weight_scale=w1_scale,
|
||||
x_scale=pertoken_scale,
|
||||
group_list=cumsum_group_list(group_list, group_list_type,
|
||||
0),
|
||||
))
|
||||
hidden_states, swiglu_out_scale, _ = torch.ops._C_ascend.grouped_matmul_swiglu_quant_weight_nz_tensor_list(
|
||||
x=hidden_states,
|
||||
weight=w1,
|
||||
weight_scale=w1_scale,
|
||||
x_scale=pertoken_scale,
|
||||
group_list=cumsum_group_list(group_list, group_list_type, 0),
|
||||
)
|
||||
elif fusion and not dynamic_eplb:
|
||||
# gmm1: gate_up_proj & act_fn: swiglu
|
||||
hidden_states, swiglu_out_scale, _ = torch_npu.npu_grouped_matmul_swiglu_quant(
|
||||
@@ -126,7 +120,8 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
weight=w1[0],
|
||||
group_list=cumsum_group_list(group_list, group_list_type, 0),
|
||||
weight_scale=w1_scale[0],
|
||||
x_scale=pertoken_scale)
|
||||
x_scale=pertoken_scale,
|
||||
)
|
||||
if quantized_hidden_states is not None:
|
||||
dispose_tensor(quantized_hidden_states)
|
||||
else:
|
||||
@@ -140,7 +135,8 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
group_list_type=group_list_type,
|
||||
group_type=0,
|
||||
group_list=group_list,
|
||||
output_dtype=torch.int32)[0]
|
||||
output_dtype=torch.int32,
|
||||
)[0]
|
||||
if quantized_hidden_states is not None:
|
||||
dispose_tensor(quantized_hidden_states)
|
||||
# act_fn: swiglu
|
||||
@@ -165,7 +161,8 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
group_list_type=group_list_type,
|
||||
group_type=0,
|
||||
group_list=group_list,
|
||||
output_dtype=w2_scale[0].dtype)[0]
|
||||
output_dtype=w2_scale[0].dtype,
|
||||
)[0]
|
||||
elif w1_offset is not None:
|
||||
# gmm1: gate_up_proj
|
||||
hidden_states = torch_npu.npu_grouped_matmul(
|
||||
@@ -177,7 +174,8 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
group_list_type=group_list_type,
|
||||
group_type=0,
|
||||
group_list=group_list,
|
||||
output_dtype=_output_dtype)[0]
|
||||
output_dtype=_output_dtype,
|
||||
)[0]
|
||||
dispose_tensor(unquantized_hidden_states)
|
||||
# act_fn: swiglu
|
||||
hidden_states = torch_npu.npu_swiglu(hidden_states)
|
||||
@@ -191,13 +189,12 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
group_list_type=group_list_type,
|
||||
group_type=0,
|
||||
group_list=group_list,
|
||||
output_dtype=_output_dtype)[0]
|
||||
output_dtype=_output_dtype,
|
||||
)[0]
|
||||
else:
|
||||
if w1_scale_bias is not None:
|
||||
if group_list_type == 0:
|
||||
group_list = torch.cat(
|
||||
[group_list[:1],
|
||||
torch.diff(group_list, dim=0)])
|
||||
group_list = torch.cat([group_list[:1], torch.diff(group_list, dim=0)])
|
||||
group_list_type = 1
|
||||
bias1 = [w1_scale_bias] if not fusion else w1_scale_bias
|
||||
bias2 = [w2_scale_bias]
|
||||
@@ -206,17 +203,14 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
|
||||
if _custom_gmm_swiglu_enabled(fusion, dynamic_eplb):
|
||||
# gmm1: gate_up_proj & act_fn: swiglu
|
||||
hidden_states, swiglu_out_scale, _ = (
|
||||
torch.ops._C_ascend.
|
||||
grouped_matmul_swiglu_quant_weight_nz_tensor_list(
|
||||
x=hidden_states,
|
||||
weight=w1,
|
||||
weight_scale=w1_scale,
|
||||
x_scale=pertoken_scale,
|
||||
group_list=cumsum_group_list(group_list, group_list_type,
|
||||
0),
|
||||
bias=bias1,
|
||||
))
|
||||
hidden_states, swiglu_out_scale, _ = torch.ops._C_ascend.grouped_matmul_swiglu_quant_weight_nz_tensor_list(
|
||||
x=hidden_states,
|
||||
weight=w1,
|
||||
weight_scale=w1_scale,
|
||||
x_scale=pertoken_scale,
|
||||
group_list=cumsum_group_list(group_list, group_list_type, 0),
|
||||
bias=bias1,
|
||||
)
|
||||
elif fusion and not dynamic_eplb:
|
||||
# gmm1: gate_up_proj & act_fn: swiglu
|
||||
hidden_states, swiglu_out_scale, _ = torch_npu.npu_grouped_matmul_swiglu_quant(
|
||||
@@ -225,7 +219,8 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
bias=bias1,
|
||||
group_list=cumsum_group_list(group_list, group_list_type, 0),
|
||||
weight_scale=w1_scale[0],
|
||||
x_scale=pertoken_scale)
|
||||
x_scale=pertoken_scale,
|
||||
)
|
||||
if quantized_hidden_states is not None:
|
||||
dispose_tensor(quantized_hidden_states)
|
||||
else:
|
||||
@@ -241,21 +236,20 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
group_list_type=group_list_type,
|
||||
group_type=0,
|
||||
group_list=group_list,
|
||||
output_dtype=_output_dtype)[0]
|
||||
output_dtype=_output_dtype,
|
||||
)[0]
|
||||
if quantized_hidden_states is not None:
|
||||
dispose_tensor(quantized_hidden_states)
|
||||
# act_fn: swiglu
|
||||
if HAS_TRITON:
|
||||
from vllm_ascend.ops.triton.activation.swiglu_quant import \
|
||||
swiglu_quant
|
||||
from vllm_ascend.ops.triton.activation.swiglu_quant import swiglu_quant
|
||||
|
||||
hidden_states, swiglu_out_scale = swiglu_quant(
|
||||
hidden_states,
|
||||
group_list=group_list,
|
||||
group_list_type=group_list_type)
|
||||
hidden_states, group_list=group_list, group_list_type=group_list_type
|
||||
)
|
||||
else:
|
||||
hidden_states = torch_npu.npu_swiglu(hidden_states)
|
||||
hidden_states, swiglu_out_scale = torch_npu.npu_dynamic_quant(
|
||||
hidden_states)
|
||||
hidden_states, swiglu_out_scale = torch_npu.npu_dynamic_quant(hidden_states)
|
||||
# gmm2: down_proj
|
||||
hidden_states = torch_npu.npu_grouped_matmul(
|
||||
x=[hidden_states],
|
||||
@@ -267,18 +261,20 @@ def quant_apply_mlp(hidden_states: torch.Tensor,
|
||||
group_list_type=group_list_type,
|
||||
group_type=0,
|
||||
group_list=group_list,
|
||||
output_dtype=_output_dtype)[0]
|
||||
output_dtype=_output_dtype,
|
||||
)[0]
|
||||
return hidden_states
|
||||
|
||||
|
||||
def unquant_apply_mlp(hidden_states: torch.Tensor,
|
||||
w1: torch.Tensor,
|
||||
w2: torch.Tensor,
|
||||
group_list: torch.Tensor,
|
||||
group_list_type: int = 1,
|
||||
topk_scales: Optional[torch.Tensor] = None,
|
||||
need_trans: bool = True) -> torch.Tensor:
|
||||
|
||||
def unquant_apply_mlp(
|
||||
hidden_states: torch.Tensor,
|
||||
w1: torch.Tensor,
|
||||
w2: torch.Tensor,
|
||||
group_list: torch.Tensor,
|
||||
group_list_type: int = 1,
|
||||
topk_scales: torch.Tensor | None = None,
|
||||
need_trans: bool = True,
|
||||
) -> torch.Tensor:
|
||||
if need_trans:
|
||||
w1 = w1.transpose(1, 2)
|
||||
w2 = w2.transpose(1, 2)
|
||||
@@ -307,44 +303,50 @@ def unquant_apply_mlp(hidden_states: torch.Tensor,
|
||||
return hidden_states
|
||||
|
||||
|
||||
def unified_apply_mlp(hidden_states: torch.Tensor,
|
||||
w1: torch.Tensor | list[torch.Tensor],
|
||||
w2: torch.Tensor | list[torch.Tensor],
|
||||
group_list: torch.Tensor,
|
||||
w1_scale: Optional[list[torch.Tensor]] = None,
|
||||
w2_scale: Optional[list[torch.Tensor]] = None,
|
||||
dynamic_scale: torch.Tensor = None,
|
||||
group_list_type: int = 1,
|
||||
w1_scale_bias: torch.Tensor = None,
|
||||
w2_scale_bias: torch.Tensor = None,
|
||||
w1_offset: Optional[torch.Tensor] = None,
|
||||
w2_offset: Optional[torch.Tensor] = None,
|
||||
topk_scales: Optional[torch.Tensor] = None,
|
||||
with_quant: bool = False,
|
||||
fusion: bool = False,
|
||||
need_trans: bool = True,
|
||||
dynamic_eplb: bool = False) -> torch.Tensor:
|
||||
def unified_apply_mlp(
|
||||
hidden_states: torch.Tensor,
|
||||
w1: torch.Tensor | list[torch.Tensor],
|
||||
w2: torch.Tensor | list[torch.Tensor],
|
||||
group_list: torch.Tensor,
|
||||
w1_scale: list[torch.Tensor] | None = None,
|
||||
w2_scale: list[torch.Tensor] | None = None,
|
||||
dynamic_scale: torch.Tensor = None,
|
||||
group_list_type: int = 1,
|
||||
w1_scale_bias: torch.Tensor = None,
|
||||
w2_scale_bias: torch.Tensor = None,
|
||||
w1_offset: torch.Tensor | None = None,
|
||||
w2_offset: torch.Tensor | None = None,
|
||||
topk_scales: torch.Tensor | None = None,
|
||||
with_quant: bool = False,
|
||||
fusion: bool = False,
|
||||
need_trans: bool = True,
|
||||
dynamic_eplb: bool = False,
|
||||
) -> torch.Tensor:
|
||||
if with_quant:
|
||||
assert w1_scale is not None and w2_scale is not None
|
||||
return quant_apply_mlp(hidden_states=hidden_states,
|
||||
w1=w1,
|
||||
w1_scale=w1_scale,
|
||||
w2=w2,
|
||||
w2_scale=w2_scale,
|
||||
group_list=group_list,
|
||||
dynamic_scale=dynamic_scale,
|
||||
group_list_type=group_list_type,
|
||||
w1_scale_bias=w1_scale_bias,
|
||||
w2_scale_bias=w2_scale_bias,
|
||||
w1_offset=w1_offset,
|
||||
w2_offset=w2_offset,
|
||||
fusion=fusion,
|
||||
dynamic_eplb=dynamic_eplb)
|
||||
return quant_apply_mlp(
|
||||
hidden_states=hidden_states,
|
||||
w1=w1,
|
||||
w1_scale=w1_scale,
|
||||
w2=w2,
|
||||
w2_scale=w2_scale,
|
||||
group_list=group_list,
|
||||
dynamic_scale=dynamic_scale,
|
||||
group_list_type=group_list_type,
|
||||
w1_scale_bias=w1_scale_bias,
|
||||
w2_scale_bias=w2_scale_bias,
|
||||
w1_offset=w1_offset,
|
||||
w2_offset=w2_offset,
|
||||
fusion=fusion,
|
||||
dynamic_eplb=dynamic_eplb,
|
||||
)
|
||||
else:
|
||||
return unquant_apply_mlp(hidden_states=hidden_states,
|
||||
w1=w1,
|
||||
w2=w2,
|
||||
group_list=group_list,
|
||||
group_list_type=group_list_type,
|
||||
topk_scales=topk_scales,
|
||||
need_trans=need_trans)
|
||||
return unquant_apply_mlp(
|
||||
hidden_states=hidden_states,
|
||||
w1=w1,
|
||||
w2=w2,
|
||||
group_list=group_list,
|
||||
group_list_type=group_list_type,
|
||||
topk_scales=topk_scales,
|
||||
need_trans=need_trans,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user