### What this PR does / why we need it?
This PR introduces support for W8A8 dynamic quantization for
Mixture-of-Experts (MoE) models on Ascend 310P devices. This is achieved
by:
- Implementing a new quantization scheme
`AscendW8A8DynamicFusedMoEMethod310`.
- Adding a unified MLP implementation (`unified_apply_mlp`) for 310P
that handles both quantized and unquantized paths.
- Refactoring the MoE and quantization configuration logic to correctly
route to the new 310P-specific implementations.
- Adding new e2e and unit tests to verify the functionality of MoE W8A8
quantization.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
- Added a new e2e test `test_qwen3_moe_tp2_w8a8` to test MoE W8A8
quantization in a multi-card setup.
- Added several new unit tests for the 310P-specific MoE components,
including `experts_selector`, `fused_moe`, `moe_comm_method`, `moe_mlp`,
and the new `w8a8_dynamic` quantization method.
- vLLM version: v0.15.0
- vLLM main:
d7e17aaacd
---------
Signed-off-by: pu-zhe <zpuaa@outlook.com>
94 lines
3.0 KiB
Python
94 lines
3.0 KiB
Python
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
|
|
|
|
import torch
|
|
import torch_npu
|
|
|
|
|
|
def quant_apply_mlp(
|
|
hidden_states: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w1_scale: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
w2_scale: torch.Tensor,
|
|
group_list: torch.Tensor,
|
|
group_list_type: int = 1,
|
|
) -> torch.Tensor:
|
|
if group_list_type == 1:
|
|
# Convert group_list to cumulative sum format if group_list is count format
|
|
group_list = torch.cumsum(group_list, dim=0)
|
|
|
|
hidden_states = torch_npu.npu_quant_grouped_matmul_dequant(
|
|
x=hidden_states, quantized_weight=w1, weight_scale=w1_scale, group_list=group_list, quant_mode="pertoken"
|
|
)
|
|
hidden_states = torch_npu.npu_swiglu(hidden_states)
|
|
hidden_states = torch_npu.npu_quant_grouped_matmul_dequant(
|
|
x=hidden_states, quantized_weight=w2, weight_scale=w2_scale, group_list=group_list, quant_mode="pertoken"
|
|
)
|
|
return hidden_states
|
|
|
|
|
|
def unquant_apply_mlp(
|
|
hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, group_list: torch.Tensor, group_list_type: int = 1
|
|
) -> torch.Tensor:
|
|
gate_up_out = torch_npu.npu_grouped_matmul(
|
|
x=[hidden_states],
|
|
weight=[w1],
|
|
split_item=2,
|
|
group_list_type=group_list_type,
|
|
group_type=0,
|
|
group_list=group_list,
|
|
)[0]
|
|
act_out = torch_npu.npu_swiglu(gate_up_out)
|
|
|
|
hidden_states = torch_npu.npu_grouped_matmul(
|
|
x=[act_out],
|
|
weight=[w2],
|
|
split_item=2,
|
|
group_list_type=group_list_type,
|
|
group_type=0,
|
|
group_list=group_list,
|
|
)[0]
|
|
return hidden_states
|
|
|
|
|
|
def unified_apply_mlp(
|
|
hidden_states: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
group_list: torch.Tensor,
|
|
w1_scale: torch.Tensor | None = None,
|
|
w2_scale: torch.Tensor | None = None,
|
|
group_list_type: int = 1,
|
|
with_quant: bool = False,
|
|
) -> torch.Tensor:
|
|
if with_quant:
|
|
assert w1_scale is not None and w2_scale is not None
|
|
return quant_apply_mlp(
|
|
hidden_states=hidden_states,
|
|
w1=w1,
|
|
w1_scale=w1_scale,
|
|
w2=w2,
|
|
w2_scale=w2_scale,
|
|
group_list=group_list,
|
|
group_list_type=group_list_type,
|
|
)
|
|
else:
|
|
return unquant_apply_mlp(
|
|
hidden_states=hidden_states, w1=w1, w2=w2, group_list=group_list, group_list_type=group_list_type
|
|
)
|