[Feat] 310p support MoE W8A8 quantizaition (#6641)
### What this PR does / why we need it?
This PR introduces support for W8A8 dynamic quantization for
Mixture-of-Experts (MoE) models on Ascend 310P devices. This is achieved
by:
- Implementing a new quantization scheme
`AscendW8A8DynamicFusedMoEMethod310`.
- Adding a unified MLP implementation (`unified_apply_mlp`) for 310P
that handles both quantized and unquantized paths.
- Refactoring the MoE and quantization configuration logic to correctly
route to the new 310P-specific implementations.
- Adding new e2e and unit tests to verify the functionality of MoE W8A8
quantization.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
- Added a new e2e test `test_qwen3_moe_tp2_w8a8` to test MoE W8A8
quantization in a multi-card setup.
- Added several new unit tests for the 310P-specific MoE components,
including `experts_selector`, `fused_moe`, `moe_comm_method`, `moe_mlp`,
and the new `w8a8_dynamic` quantization method.
- vLLM version: v0.15.0
- vLLM main:
d7e17aaacd
---------
Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
@@ -15,8 +15,7 @@
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
from . import w8a8_static # noqa: F401
|
||||
|
||||
# Future extensions:
|
||||
# from . import w8a8_dynamic # noqa: F401
|
||||
# from . import w4a16 # noqa: F401
|
||||
from . import (
|
||||
w8a8_dynamic, # noqa: F401
|
||||
w8a8_static, # noqa: F401
|
||||
)
|
||||
|
||||
149
vllm_ascend/_310p/quantization/methods/w8a8_dynamic.py
Normal file
149
vllm_ascend/_310p/quantization/methods/w8a8_dynamic.py
Normal file
@@ -0,0 +1,149 @@
|
||||
#
|
||||
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
from vllm.config import get_current_vllm_config
|
||||
from vllm.distributed import get_ep_group
|
||||
from vllm.forward_context import get_forward_context
|
||||
|
||||
from vllm_ascend._310p.fused_moe.experts_selector import select_experts
|
||||
from vllm_ascend.ops.fused_moe.experts_selector import zero_experts_compute
|
||||
from vllm_ascend.quantization.methods.base import AscendMoEScheme, QuantType
|
||||
|
||||
from .registry import register_scheme
|
||||
|
||||
|
||||
@register_scheme("W8A8_DYNAMIC", "moe")
|
||||
class AscendW8A8DynamicFusedMoEMethod310(AscendMoEScheme):
|
||||
"""310P-only FusedMoE method for Ascend W8A8_DYNAMIC.
|
||||
|
||||
Notes:
|
||||
- This scheme is discovered via 310P local registry.
|
||||
"""
|
||||
|
||||
# Declare the quantization type for this scheme
|
||||
quant_type: QuantType = QuantType.W8A8
|
||||
|
||||
def __init__(self):
|
||||
self.ep_group = get_ep_group()
|
||||
vllm_config = get_current_vllm_config()
|
||||
self.in_dtype = vllm_config.model_config.dtype
|
||||
|
||||
def get_weight(
|
||||
self, num_experts: int, intermediate_size_per_partition: int, hidden_sizes: int, params_dtype: torch.dtype
|
||||
) -> dict[str, Any]:
|
||||
param_dict = {}
|
||||
# Fused gate_up_proj (column parallel)
|
||||
param_dict["w13_weight"] = torch.empty(
|
||||
num_experts, 2 * intermediate_size_per_partition, hidden_sizes, dtype=torch.int8
|
||||
)
|
||||
# down_proj (row parallel)
|
||||
param_dict["w2_weight"] = torch.empty(
|
||||
num_experts, hidden_sizes, intermediate_size_per_partition, dtype=torch.int8
|
||||
)
|
||||
return param_dict
|
||||
|
||||
def get_dynamic_quant_param(
|
||||
self, num_experts: int, intermediate_size_per_partition: int, hidden_sizes: int, params_dtype: torch.dtype
|
||||
) -> dict[str, Any]:
|
||||
param_dict = {}
|
||||
param_dict["w13_weight_scale"] = torch.empty(
|
||||
num_experts, 2 * intermediate_size_per_partition, 1, dtype=torch.float32
|
||||
)
|
||||
param_dict["w13_weight_offset"] = torch.empty(
|
||||
num_experts, 2 * intermediate_size_per_partition, 1, dtype=params_dtype
|
||||
)
|
||||
param_dict["w2_weight_scale"] = torch.empty(num_experts, hidden_sizes, 1, dtype=torch.float32)
|
||||
param_dict["w2_weight_offset"] = torch.empty(num_experts, hidden_sizes, 1, dtype=params_dtype)
|
||||
return param_dict
|
||||
|
||||
def apply(
|
||||
self,
|
||||
layer: torch.nn.Module,
|
||||
x: torch.Tensor,
|
||||
router_logits: torch.Tensor,
|
||||
top_k: int,
|
||||
renormalize: bool,
|
||||
use_grouped_topk: bool = False,
|
||||
global_num_experts: int = -1,
|
||||
expert_map: torch.Tensor | None = None,
|
||||
topk_group: int | None = None,
|
||||
num_expert_group: int | None = None,
|
||||
custom_routing_function: Callable | None = None,
|
||||
scoring_func: str = "softmax",
|
||||
routed_scaling_factor: float = 1.0,
|
||||
e_score_correction_bias: torch.Tensor | None = None,
|
||||
is_prefill: bool = True,
|
||||
enable_force_load_balance: bool = False,
|
||||
log2phy: torch.Tensor | None = None,
|
||||
global_redundant_expert_num: int = 0,
|
||||
pertoken_scale: Any | None = None,
|
||||
**kwargs,
|
||||
) -> torch.Tensor:
|
||||
zero_expert_num = getattr(layer, "zero_expert_num", 0)
|
||||
zero_expert_type = getattr(layer, "zero_expert_type", None)
|
||||
|
||||
topk_weights, topk_ids = select_experts(
|
||||
hidden_states=x,
|
||||
router_logits=router_logits,
|
||||
top_k=top_k,
|
||||
use_grouped_topk=use_grouped_topk,
|
||||
renormalize=renormalize,
|
||||
topk_group=topk_group,
|
||||
num_expert_group=num_expert_group,
|
||||
custom_routing_function=custom_routing_function,
|
||||
scoring_func=scoring_func,
|
||||
e_score_correction_bias=e_score_correction_bias,
|
||||
global_num_experts=global_num_experts,
|
||||
)
|
||||
|
||||
if zero_expert_num > 0 and zero_expert_type is not None:
|
||||
topk_ids, topk_weights, zero_expert_result = zero_experts_compute(
|
||||
expert_indices=topk_ids,
|
||||
expert_scales=topk_weights,
|
||||
num_experts=global_num_experts,
|
||||
zero_expert_type=zero_expert_type,
|
||||
hidden_states=x,
|
||||
)
|
||||
|
||||
topk_weights = topk_weights.to(self.in_dtype)
|
||||
|
||||
moe_comm_method = get_forward_context().moe_comm_method
|
||||
|
||||
final_hidden_states = moe_comm_method.fused_experts(
|
||||
hidden_states=x,
|
||||
w1=layer.w13_weight,
|
||||
w1_scale=layer.w13_weight_scale,
|
||||
w2=layer.w2_weight,
|
||||
w2_scale=layer.w2_weight_scale,
|
||||
topk_weights=topk_weights,
|
||||
topk_ids=topk_ids,
|
||||
expert_map=expert_map,
|
||||
use_int8_w8a8=True,
|
||||
)
|
||||
if zero_expert_num > 0 and zero_expert_type is not None:
|
||||
final_hidden_states += zero_expert_result
|
||||
return final_hidden_states
|
||||
|
||||
def process_weights_after_loading(self, layer):
|
||||
layer.w13_weight_scale.data = layer.w13_weight_scale.data.view(layer.w13_weight_scale.data.shape[0], -1)
|
||||
layer.w13_weight_offset.data = layer.w13_weight_offset.data.view(layer.w13_weight_offset.data.shape[0], -1)
|
||||
layer.w2_weight_scale.data = layer.w2_weight_scale.data.view(layer.w2_weight_scale.data.shape[0], -1)
|
||||
layer.w2_weight_offset.data = layer.w2_weight_offset.data.view(layer.w2_weight_offset.data.shape[0], -1)
|
||||
@@ -50,13 +50,7 @@ class AscendW8A8LinearMethod310(AscendLinearScheme):
|
||||
def get_perchannel_param(self, output_size: int, params_dtype: torch.dtype) -> dict[str, Any]:
|
||||
params: dict[str, Any] = {}
|
||||
params["quant_bias"] = torch.empty(output_size, dtype=torch.int32)
|
||||
|
||||
# NOTE: keep identical to your current working behavior.
|
||||
if params_dtype == torch.bfloat16:
|
||||
params["deq_scale"] = torch.empty(output_size, dtype=torch.float32)
|
||||
else:
|
||||
params["deq_scale"] = torch.empty(output_size, dtype=torch.int64)
|
||||
|
||||
params["deq_scale"] = torch.empty(output_size, dtype=torch.int64)
|
||||
params["weight_scale"] = torch.empty(output_size, 1, dtype=params_dtype)
|
||||
params["weight_offset"] = torch.empty(output_size, 1, dtype=params_dtype)
|
||||
return params
|
||||
|
||||
Reference in New Issue
Block a user