[Feat] 310p support MoE W8A8 quantizaition (#6641)

### What this PR does / why we need it?
This PR introduces support for W8A8 dynamic quantization for
Mixture-of-Experts (MoE) models on Ascend 310P devices. This is achieved
by:
- Implementing a new quantization scheme
`AscendW8A8DynamicFusedMoEMethod310`.
- Adding a unified MLP implementation (`unified_apply_mlp`) for 310P
that handles both quantized and unquantized paths.
- Refactoring the MoE and quantization configuration logic to correctly
route to the new 310P-specific implementations.
- Adding new e2e and unit tests to verify the functionality of MoE W8A8
quantization.

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
- Added a new e2e test `test_qwen3_moe_tp2_w8a8` to test MoE W8A8
quantization in a multi-card setup.
- Added several new unit tests for the 310P-specific MoE components,
including `experts_selector`, `fused_moe`, `moe_comm_method`, `moe_mlp`,
and the new `w8a8_dynamic` quantization method.

- vLLM version: v0.15.0
- vLLM main:
d7e17aaacd

---------

Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
pu-zhe
2026-02-10 17:17:44 +08:00
committed by GitHub
parent 1eb07986bf
commit 02886e2641
15 changed files with 695 additions and 157 deletions

View File

@@ -44,3 +44,17 @@ def test_qwen3_moe_ep4_fp16():
enable_expert_parallel=True
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
def test_qwen3_moe_tp2_w8a8():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
"vllm-ascend/Qwen3-30B-A3B-W8A8",
tensor_parallel_size=2,
enforce_eager=True,
dtype="float16",
quantization="ascend"
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -0,0 +1,42 @@
#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from vllm_ascend._310p.fused_moe.experts_selector import select_experts
class TestExpertsSelector310:
@pytest.mark.parametrize("global_num_experts", [256, 128])
def test_select_experts(self, global_num_experts):
x = torch.randn(8, 2)
router_logits = torch.randn(8, 2)
topk_weights, topk_ids = select_experts(
hidden_states=x,
router_logits=router_logits,
top_k=2,
use_grouped_topk=False,
renormalize=True,
topk_group=None,
num_expert_group=None,
custom_routing_function=None,
scoring_func="softmax",
e_score_correction_bias=None,
global_num_experts=global_num_experts,
)
assert topk_weights.shape == (8, 2)
assert topk_ids.shape == (8, 2)

View File

@@ -0,0 +1,132 @@
#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import call, patch
import torch
from tests.ut.base import TestBase
from vllm_ascend._310p.fused_moe.moe_mlp import unified_apply_mlp
class TestUnifiedApplyMLP310(TestBase):
@patch("torch_npu.npu_grouped_matmul")
@patch("torch_npu.npu_swiglu")
def test_unified_apply_mlp_without_quantization_310(self, mock_npu_swiglu, mock_npu_grouped_matmul):
mock_gmm1_out = torch.randn(10, 40, dtype=torch.float16)
mock_gmm2_out = torch.randn(10, 20, dtype=torch.float16)
mock_npu_grouped_matmul.side_effect = [[mock_gmm1_out], [mock_gmm2_out]]
mock_npu_swiglu_output = torch.randn(10, 40, dtype=torch.float16)
mock_npu_swiglu.return_value = mock_npu_swiglu_output
hidden_states = torch.randn(10, 20, dtype=torch.float16)
w1 = torch.randn(5, 20, 40, dtype=torch.float16)
w2 = torch.randn(5, 40, 20, dtype=torch.float16)
group_list = torch.tensor([2, 4, 6, 8, 10], dtype=torch.int64)
result = unified_apply_mlp(
hidden_states=hidden_states,
w1=w1,
w1_scale=None,
w2=w2,
w2_scale=None,
group_list=group_list,
group_list_type=1,
with_quant=False,
)
self.assertEqual(mock_npu_grouped_matmul.call_count, 2)
mock_npu_grouped_matmul.assert_has_calls(
[
call(
x=[hidden_states], weight=[w1], split_item=2, group_list_type=1, group_type=0, group_list=group_list
),
call(
x=[mock_npu_swiglu_output],
weight=[w2],
split_item=2,
group_list_type=1,
group_type=0,
group_list=group_list,
),
],
any_order=True,
)
mock_npu_swiglu.assert_called_once()
mock_npu_swiglu.assert_called_with(mock_gmm1_out)
self.assertEqual(result.shape, hidden_states.shape)
self.assertEqual(result.dtype, torch.float16)
@patch("torch.cumsum")
@patch("torch_npu.npu_quant_grouped_matmul_dequant")
@patch("torch_npu.npu_swiglu")
def test_unified_apply_mlp_with_quantization_310(
self, mock_npu_swiglu, mock_npu_quant_grouped_matmul_dequant, mock_cumsum
):
mock_cumsum_out = torch.arange(0, 10, dtype=torch.int64)
mock_cumsum.return_value = mock_cumsum_out
mock_gmm1_out = torch.randn(10, 40, dtype=torch.float16)
mock_gmm2_out = torch.randn(10, 20, dtype=torch.float16)
mock_npu_quant_grouped_matmul_dequant.side_effect = [mock_gmm1_out, mock_gmm2_out]
mock_npu_swiglu_output = torch.randn(10, 40, dtype=torch.float16)
mock_npu_swiglu.return_value = mock_npu_swiglu_output
hidden_states = torch.randn(10, 20, dtype=torch.float16)
w1 = torch.randn(5, 20, 40, dtype=torch.float16)
w1_scale = torch.rand(5, 40, dtype=torch.float32)
w2 = torch.randn(5, 40, 20, dtype=torch.float16)
w2_scale = torch.rand(5, 40, dtype=torch.float32)
group_list = torch.tensor([2, 4, 6, 8, 10], dtype=torch.int64)
result = unified_apply_mlp(
hidden_states=hidden_states,
w1=w1,
w1_scale=w1_scale,
w2=w2,
w2_scale=w2_scale,
group_list=group_list,
group_list_type=1,
with_quant=True,
)
mock_cumsum.assert_called_once()
self.assertEqual(mock_npu_quant_grouped_matmul_dequant.call_count, 2)
mock_npu_quant_grouped_matmul_dequant.assert_has_calls(
[
call(
x=hidden_states,
quantized_weight=w1,
weight_scale=w1_scale,
group_list=mock_cumsum_out,
quant_mode="pertoken",
),
call(
x=mock_npu_swiglu_output,
quantized_weight=w2,
weight_scale=w2_scale,
group_list=mock_cumsum_out,
quant_mode="pertoken",
),
],
any_order=True,
)
mock_npu_swiglu.assert_called_once()
mock_npu_swiglu.assert_called_with(mock_gmm1_out)
self.assertEqual(result.shape, hidden_states.shape)
self.assertEqual(result.dtype, torch.float16)

View File

@@ -1,10 +1,26 @@
#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock, patch
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.fused_moe.config import FusedMoEConfig
from vllm.model_executor.layers.fused_moe.config import FusedMoEConfig, FusedMoEParallelConfig
from vllm.model_executor.layers.linear import LinearBase
from tests.ut.base import TestBase
from vllm_ascend._310p.fused_moe.fused_moe import AscendUnquantizedFusedMoEMethod310
from vllm_ascend._310p.quantization.modelslim_config import AscendModelSlimConfig310
from vllm_ascend.ops.linear import AscendUnquantizedLinearMethod
@@ -31,7 +47,7 @@ class TestAscendModelSlimConfig310(TestBase):
# Test skipped layer
with (
patch("vllm_ascend._310p.quantization.modelslim_config.get_current_vllm_config", return_value=mock_config),
patch.object(self.ascend_config, "is_layer_skipped_ascend", return_value=True)
patch.object(self.ascend_config, "is_layer_skipped_ascend", return_value=True),
):
method = self.ascend_config.get_quant_method(linear_layer, ".attn")
self.assertIsInstance(method, AscendUnquantizedLinearMethod)
@@ -54,14 +70,35 @@ class TestAscendModelSlimConfig310(TestBase):
fused_moe_layer = MagicMock(spec=FusedMoE)
fused_moe_layer.moe = MagicMock(spec=FusedMoEConfig)
fused_moe_layer.moe_config = MagicMock(spec=FusedMoEConfig)
fused_moe_layer.moe_config.moe_parallel_config = MagicMock(spec=FusedMoEParallelConfig)
fused_moe_layer.moe_config.moe_parallel_config.use_ep = True
fused_moe_layer.moe_config.moe_parallel_config.dp_size = 1
mock_config = MagicMock()
mock_config.model_config.hf_config.model_type = None
mock_config.compilation_config.custom_ops = ["all"]
mock_scheme = MagicMock()
# Test skipped layer
with (
patch("vllm.config.vllm.get_current_vllm_config", return_value=mock_config),
patch("vllm_ascend._310p.quantization.modelslim_config.get_current_vllm_config", return_value=mock_config),
patch("vllm_ascend.quantization.modelslim_config.get_current_vllm_config", return_value=mock_config),
patch.object(self.ascend_config, "is_layer_skipped_ascend", return_value=True),
):
method = self.ascend_config.get_quant_method(fused_moe_layer, ".moe")
self.assertIsInstance(method, AscendUnquantizedFusedMoEMethod310)
# Test quantized layer
mock_scheme = MagicMock()
with (
patch.object(self.ascend_config, "is_layer_skipped_ascend", return_value=False),
patch("vllm.config.vllm.get_current_vllm_config", return_value=mock_config),
patch("vllm_ascend._310p.quantization.modelslim_config.get_current_vllm_config", return_value=mock_config),
patch("vllm_ascend.quantization.modelslim_config.get_current_vllm_config", return_value=mock_config),
patch("vllm_ascend._310p.quantization.modelslim_config.create_scheme_for_layer", return_value=mock_scheme),
patch("vllm_ascend._310p.quantization.modelslim_config.AscendLinearMethod", return_value=MagicMock()),
self.assertRaises(NotImplementedError),
patch(
"vllm_ascend._310p.quantization.modelslim_config.AscendFusedMoEMethod", return_value=MagicMock()
) as fused_moe_method,
):
self.ascend_config.get_quant_method(fused_moe_layer, "moe_layer")
method = self.ascend_config.get_quant_method(fused_moe_layer, ".moe")
self.assertIs(method, fused_moe_method.return_value)
fused_moe_method.assert_called_once_with(mock_scheme, fused_moe_layer.moe_config)

View File

@@ -0,0 +1,66 @@
#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
import torch
from tests.ut.base import TestBase
from vllm_ascend._310p.quantization.methods.w8a8_dynamic import AscendW8A8DynamicFusedMoEMethod310
class TestAscendW8A8FusedMoEMethod310(TestBase):
num_experts = 8
hidden_size = 128
intermediate_size = 128
@patch("vllm_ascend._310p.quantization.methods.w8a8_dynamic.get_ep_group")
def setUp(self, mock_get_ep_group):
with patch(
"vllm_ascend._310p.quantization.methods.w8a8_dynamic.get_current_vllm_config"
) as mock_get_current_vllm_config:
mock_vllm_config = Mock()
mock_vllm_config.quant_config = Mock(quant_description={"group_size": 0})
mock_vllm_config.scheduler_config = Mock(
max_num_batched_tokens=2048, max_model_len=2048, enable_chunked_prefill=False
)
mock_get_current_vllm_config.return_value = mock_vllm_config
mock_ep_group = Mock()
mock_get_ep_group.return_value = mock_ep_group
mock_ascend_config = Mock()
mock_ascend_config.enable_chunked_prefill = False
self.quant_method = AscendW8A8DynamicFusedMoEMethod310()
def test_get_weight_310(self):
param_dict = self.quant_method.get_weight(
self.num_experts, self.intermediate_size, self.hidden_size, torch.float16
)
self.assertEqual(param_dict["w13_weight"].dtype, torch.int8)
self.assertEqual(
param_dict["w13_weight"].shape, (self.num_experts, 2 * self.intermediate_size, self.hidden_size)
)
self.assertEqual(param_dict["w2_weight"].dtype, torch.int8)
self.assertEqual(param_dict["w2_weight"].shape, (self.num_experts, self.hidden_size, self.intermediate_size))
def test_get_dynamic_quant_param_310(self):
param_dict = self.quant_method.get_dynamic_quant_param(
self.num_experts, self.intermediate_size, self.hidden_size, torch.float16
)
self.assertEqual(param_dict["w13_weight_scale"].dtype, torch.float32)
self.assertEqual(param_dict["w13_weight_scale"].shape, (self.num_experts, 2 * self.intermediate_size, 1))
self.assertEqual(param_dict["w2_weight_scale"].dtype, torch.float32)
self.assertEqual(param_dict["w2_weight_scale"].shape, (self.num_experts, self.hidden_size, 1))

View File

@@ -1,3 +1,18 @@
#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock, patch
import torch
@@ -16,19 +31,19 @@ class TestAscendW8A8LinearMethod310(TestBase):
self.assertEqual(weight["weight"].shape, (20, 10))
def test_get_pertensor_param_310(self):
params = self.method.get_pertensor_param(torch.bfloat16)
self.assertEqual(params["input_scale"].dtype, torch.bfloat16)
params = self.method.get_pertensor_param(torch.float16)
self.assertEqual(params["input_scale"].dtype, torch.float16)
self.assertEqual(params["input_offset"].dtype, torch.int8)
self.assertEqual(params["input_scale"].shape, (1,))
self.assertEqual(params["input_offset"].shape, (1,))
def test_get_perchannel_param_310(self):
params = self.method.get_perchannel_param(10, torch.bfloat16)
params = self.method.get_perchannel_param(10, torch.float16)
self.assertEqual(params["quant_bias"].dtype, torch.int32)
self.assertEqual(params["deq_scale"].dtype, torch.float32)
self.assertEqual(params["weight_scale"].dtype, torch.bfloat16)
self.assertEqual(params["weight_offset"].dtype, torch.bfloat16)
self.assertEqual(params["deq_scale"].dtype, torch.int64)
self.assertEqual(params["weight_scale"].dtype, torch.float16)
self.assertEqual(params["weight_offset"].dtype, torch.float16)
self.assertEqual(params["quant_bias"].shape, (10,))
self.assertEqual(params["deq_scale"].shape, (10,))
self.assertEqual(params["weight_scale"].shape, (10, 1))

View File

@@ -19,7 +19,6 @@ from collections.abc import Callable
import torch
from vllm_ascend.ops.fused_moe.experts_selector import _native_select_experts
from vllm_ascend.utils import get_weight_prefetch_method
def select_experts(
@@ -55,9 +54,6 @@ def select_experts(
topk_weights: router weights of shape (num_tokens, top_k).
topk_ids: selected expert IDs of shape (num_tokens, top_k).
"""
# prefetch w1_w3_proj.weight preprocess
weight_prefetch_method = get_weight_prefetch_method()
weight_prefetch_method.maybe_prefetch_moe_weight_preprocess(hidden_states, "gate_up")
topk_weights, topk_ids = _native_select_experts(
hidden_states=hidden_states,
router_logits=router_logits,

View File

@@ -58,7 +58,6 @@ class AscendUnquantizedFusedMoEMethod310(UnquantizedFusedMoEMethod):
num_expert_group: int | None = None,
custom_routing_function: Callable | None = None,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
e_score_correction_bias: torch.Tensor | None = None,
global_num_experts: int = -1,
expert_map: torch.Tensor | None = None,
@@ -67,7 +66,6 @@ class AscendUnquantizedFusedMoEMethod310(UnquantizedFusedMoEMethod):
) -> torch.Tensor:
zero_expert_num = getattr(layer, "zero_expert_num", 0)
zero_expert_type = getattr(layer, "zero_expert_type", None)
assert routed_scaling_factor == 1.0
topk_weights, topk_ids = select_experts(
hidden_states=x,
@@ -195,44 +193,36 @@ class AscendFusedMoE310(FusedMoE):
method = quant_method.quant_method
quant_type = getattr(method, "quant_type", QuantType.NONE)
if quant_type != QuantType.NONE:
# TODO: w8a8 quantization will be supported soon, and only reject w4a8 here.
raise RuntimeError("W8A8 is not supported currently.")
return QuantType.NONE
if quant_type not in [QuantType.NONE, QuantType.W8A8]:
raise RuntimeError("Only Unquant and W8A8 is supported.")
return quant_type
def forward_impl( # type: ignore[override]
self, hidden_states: torch.Tensor, router_logits: torch.Tensor
) -> torch.Tensor:
assert self.quant_method is not None
assert self.routed_scaling_factor == 1.0, "routed_scaling_factor != 1.0 is not supported."
forward_context = get_forward_context()
hidden_states, router_logits, _, context_metadata = forward_context.moe_comm_method.prepare(
hidden_states=hidden_states, router_logits=router_logits, quant_type=self.quant_type
)
if isinstance(hidden_states, tuple):
hidden_states, pertoken_scale = hidden_states
else:
pertoken_scale = None
# Matrix multiply.
fused_experts_results: FusedExpertsResult = self.quant_method.apply(
layer=self,
x=hidden_states,
router_logits=router_logits,
pertoken_scale=pertoken_scale,
top_k=self.top_k,
renormalize=self.renormalize,
use_grouped_topk=self.use_grouped_topk,
global_num_experts=self.global_num_experts,
expert_map=self.local_expert_map,
top_k=self.top_k,
router_logits=router_logits,
renormalize=self.renormalize,
topk_group=self.topk_group,
num_expert_group=self.num_expert_group,
custom_routing_function=self.custom_routing_function,
scoring_func=self.scoring_func,
routed_scaling_factor=self.routed_scaling_factor,
e_score_correction_bias=self.e_score_correction_bias,
activation=self.activation,
global_num_experts=self.global_num_experts,
expert_map=self.local_expert_map,
apply_router_weight_on_input=self.apply_router_weight_on_input,
)

View File

@@ -1,39 +1,90 @@
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
from __future__ import annotations
from vllm_ascend.ops.fused_moe.moe_comm_method import AllGatherCommImpl
from .token_dispatcher import TokenDispatcherWithAllGather310
class AllGatherCommImpl310(AllGatherCommImpl):
"""This implementation is the same as NativeAllGatherCommImpl,
but uses NPU-specific ops for better performance.
This implementation should be compatible with all scenarios, and
thus it is the default implementation for MoE communication methods.
It uses `torch_npu.npu_moe_init_routing_v2` for pre-processing
and `torch_npu.npu_moe_token_unpermute` for post-processing
to handle the token-to-expert mapping and communication efficiently.
"""
def _get_token_dispatcher(self):
return TokenDispatcherWithAllGather310(
top_k=self.moe_config.experts_per_token,
num_experts=self.moe_config.num_experts,
num_local_experts=self.moe_config.num_local_experts,
)
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
from __future__ import annotations
import torch
from vllm.forward_context import get_forward_context
from vllm_ascend.ops.fused_moe.moe_comm_method import AllGatherCommImpl, FusedExpertsResult
from .moe_mlp import unified_apply_mlp
from .token_dispatcher import TokenDispatcherWithAllGather310
class AllGatherCommImpl310(AllGatherCommImpl):
"""This implementation is the same as NativeAllGatherCommImpl,
but uses NPU-specific ops for better performance.
This implementation should be compatible with all scenarios, and
thus it is the default implementation for MoE communication methods.
It uses `torch_npu.npu_moe_init_routing_v2` for pre-processing
and `torch_npu.npu_moe_token_unpermute` for post-processing
to handle the token-to-expert mapping and communication efficiently.
"""
def fused_experts( # type: ignore[override]
self,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
expert_map: torch.Tensor | None = None,
use_int8_w8a8: bool = False,
w1_scale: torch.Tensor | None = None,
w2_scale: torch.Tensor | None = None,
apply_router_weight_on_input: bool = False,
) -> FusedExpertsResult:
# This method is overridden to use the 310p-specific unified_apply_mlp
# which provides optimized MLP computation for the 310p platform
moe_comm_method = get_forward_context().moe_comm_method
assert moe_comm_method is not None, "Missing communication context"
dispatch_results = self.token_dispatcher.token_dispatch(
hidden_states=hidden_states,
topk_weights=topk_weights,
topk_ids=topk_ids,
expert_map=expert_map,
apply_router_weight_on_input=apply_router_weight_on_input,
)
mlp_output = unified_apply_mlp(
hidden_states=dispatch_results.hidden_states,
w1=w1,
w2=w2,
w1_scale=w1_scale,
w2_scale=w2_scale,
group_list=dispatch_results.group_list,
group_list_type=dispatch_results.group_list_type,
with_quant=use_int8_w8a8,
)
combine_results = self.token_dispatcher.token_combine(
hidden_states=mlp_output, context_metadata=dispatch_results.context_metadata
)
return FusedExpertsResult(
routed_out=combine_results.routed_out,
group_list_type=dispatch_results.group_list_type,
expert_tokens=dispatch_results.group_list,
)
def _get_token_dispatcher(self):
return TokenDispatcherWithAllGather310(
top_k=self.moe_config.experts_per_token,
num_experts=self.moe_config.num_experts,
num_local_experts=self.moe_config.num_local_experts,
)

View File

@@ -0,0 +1,93 @@
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
import torch
import torch_npu
def quant_apply_mlp(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w1_scale: torch.Tensor,
w2: torch.Tensor,
w2_scale: torch.Tensor,
group_list: torch.Tensor,
group_list_type: int = 1,
) -> torch.Tensor:
if group_list_type == 1:
# Convert group_list to cumulative sum format if group_list is count format
group_list = torch.cumsum(group_list, dim=0)
hidden_states = torch_npu.npu_quant_grouped_matmul_dequant(
x=hidden_states, quantized_weight=w1, weight_scale=w1_scale, group_list=group_list, quant_mode="pertoken"
)
hidden_states = torch_npu.npu_swiglu(hidden_states)
hidden_states = torch_npu.npu_quant_grouped_matmul_dequant(
x=hidden_states, quantized_weight=w2, weight_scale=w2_scale, group_list=group_list, quant_mode="pertoken"
)
return hidden_states
def unquant_apply_mlp(
hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, group_list: torch.Tensor, group_list_type: int = 1
) -> torch.Tensor:
gate_up_out = torch_npu.npu_grouped_matmul(
x=[hidden_states],
weight=[w1],
split_item=2,
group_list_type=group_list_type,
group_type=0,
group_list=group_list,
)[0]
act_out = torch_npu.npu_swiglu(gate_up_out)
hidden_states = torch_npu.npu_grouped_matmul(
x=[act_out],
weight=[w2],
split_item=2,
group_list_type=group_list_type,
group_type=0,
group_list=group_list,
)[0]
return hidden_states
def unified_apply_mlp(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
group_list: torch.Tensor,
w1_scale: torch.Tensor | None = None,
w2_scale: torch.Tensor | None = None,
group_list_type: int = 1,
with_quant: bool = False,
) -> torch.Tensor:
if with_quant:
assert w1_scale is not None and w2_scale is not None
return quant_apply_mlp(
hidden_states=hidden_states,
w1=w1,
w1_scale=w1_scale,
w2=w2,
w2_scale=w2_scale,
group_list=group_list,
group_list_type=group_list_type,
)
else:
return unquant_apply_mlp(
hidden_states=hidden_states, w1=w1, w2=w2, group_list=group_list, group_list_type=group_list_type
)

View File

@@ -32,21 +32,14 @@ class TokenDispatcherWithAllGather310(TokenDispatcherWithAllGather):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def token_dispatch(
def token_dispatch( # type: ignore[override]
self,
hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
expert_map: torch.Tensor | None = None,
global_redundant_expert_num: int = 0,
mc2_mask: torch.Tensor | None = None,
apply_router_weight_on_input: bool = False,
with_quant: bool = False,
dynamic_eplb: bool = False,
pertoken_scale: torch.Tensor | None = None,
):
if with_quant:
raise RuntimeError("Quant is not supported for 310P currently.")
self.original_shape = hidden_states.shape
num_tokens = hidden_states.shape[:-1].numel()
@@ -77,7 +70,6 @@ class TokenDispatcherWithAllGather310(TokenDispatcherWithAllGather):
return TokenDispatchResult(
hidden_states=sorted_hidden_states,
dynamic_scale=None,
group_list=expert_tokens,
group_list_type=group_list_type,
context_metadata=context_metadata,

View File

@@ -15,8 +15,7 @@
# This file is a part of the vllm-ascend project.
#
from . import w8a8_static # noqa: F401
# Future extensions:
# from . import w8a8_dynamic # noqa: F401
# from . import w4a16 # noqa: F401
from . import (
w8a8_dynamic, # noqa: F401
w8a8_static, # noqa: F401
)

View File

@@ -0,0 +1,149 @@
#
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from collections.abc import Callable
from typing import Any
import torch
from vllm.config import get_current_vllm_config
from vllm.distributed import get_ep_group
from vllm.forward_context import get_forward_context
from vllm_ascend._310p.fused_moe.experts_selector import select_experts
from vllm_ascend.ops.fused_moe.experts_selector import zero_experts_compute
from vllm_ascend.quantization.methods.base import AscendMoEScheme, QuantType
from .registry import register_scheme
@register_scheme("W8A8_DYNAMIC", "moe")
class AscendW8A8DynamicFusedMoEMethod310(AscendMoEScheme):
"""310P-only FusedMoE method for Ascend W8A8_DYNAMIC.
Notes:
- This scheme is discovered via 310P local registry.
"""
# Declare the quantization type for this scheme
quant_type: QuantType = QuantType.W8A8
def __init__(self):
self.ep_group = get_ep_group()
vllm_config = get_current_vllm_config()
self.in_dtype = vllm_config.model_config.dtype
def get_weight(
self, num_experts: int, intermediate_size_per_partition: int, hidden_sizes: int, params_dtype: torch.dtype
) -> dict[str, Any]:
param_dict = {}
# Fused gate_up_proj (column parallel)
param_dict["w13_weight"] = torch.empty(
num_experts, 2 * intermediate_size_per_partition, hidden_sizes, dtype=torch.int8
)
# down_proj (row parallel)
param_dict["w2_weight"] = torch.empty(
num_experts, hidden_sizes, intermediate_size_per_partition, dtype=torch.int8
)
return param_dict
def get_dynamic_quant_param(
self, num_experts: int, intermediate_size_per_partition: int, hidden_sizes: int, params_dtype: torch.dtype
) -> dict[str, Any]:
param_dict = {}
param_dict["w13_weight_scale"] = torch.empty(
num_experts, 2 * intermediate_size_per_partition, 1, dtype=torch.float32
)
param_dict["w13_weight_offset"] = torch.empty(
num_experts, 2 * intermediate_size_per_partition, 1, dtype=params_dtype
)
param_dict["w2_weight_scale"] = torch.empty(num_experts, hidden_sizes, 1, dtype=torch.float32)
param_dict["w2_weight_offset"] = torch.empty(num_experts, hidden_sizes, 1, dtype=params_dtype)
return param_dict
def apply(
self,
layer: torch.nn.Module,
x: torch.Tensor,
router_logits: torch.Tensor,
top_k: int,
renormalize: bool,
use_grouped_topk: bool = False,
global_num_experts: int = -1,
expert_map: torch.Tensor | None = None,
topk_group: int | None = None,
num_expert_group: int | None = None,
custom_routing_function: Callable | None = None,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
e_score_correction_bias: torch.Tensor | None = None,
is_prefill: bool = True,
enable_force_load_balance: bool = False,
log2phy: torch.Tensor | None = None,
global_redundant_expert_num: int = 0,
pertoken_scale: Any | None = None,
**kwargs,
) -> torch.Tensor:
zero_expert_num = getattr(layer, "zero_expert_num", 0)
zero_expert_type = getattr(layer, "zero_expert_type", None)
topk_weights, topk_ids = select_experts(
hidden_states=x,
router_logits=router_logits,
top_k=top_k,
use_grouped_topk=use_grouped_topk,
renormalize=renormalize,
topk_group=topk_group,
num_expert_group=num_expert_group,
custom_routing_function=custom_routing_function,
scoring_func=scoring_func,
e_score_correction_bias=e_score_correction_bias,
global_num_experts=global_num_experts,
)
if zero_expert_num > 0 and zero_expert_type is not None:
topk_ids, topk_weights, zero_expert_result = zero_experts_compute(
expert_indices=topk_ids,
expert_scales=topk_weights,
num_experts=global_num_experts,
zero_expert_type=zero_expert_type,
hidden_states=x,
)
topk_weights = topk_weights.to(self.in_dtype)
moe_comm_method = get_forward_context().moe_comm_method
final_hidden_states = moe_comm_method.fused_experts(
hidden_states=x,
w1=layer.w13_weight,
w1_scale=layer.w13_weight_scale,
w2=layer.w2_weight,
w2_scale=layer.w2_weight_scale,
topk_weights=topk_weights,
topk_ids=topk_ids,
expert_map=expert_map,
use_int8_w8a8=True,
)
if zero_expert_num > 0 and zero_expert_type is not None:
final_hidden_states += zero_expert_result
return final_hidden_states
def process_weights_after_loading(self, layer):
layer.w13_weight_scale.data = layer.w13_weight_scale.data.view(layer.w13_weight_scale.data.shape[0], -1)
layer.w13_weight_offset.data = layer.w13_weight_offset.data.view(layer.w13_weight_offset.data.shape[0], -1)
layer.w2_weight_scale.data = layer.w2_weight_scale.data.view(layer.w2_weight_scale.data.shape[0], -1)
layer.w2_weight_offset.data = layer.w2_weight_offset.data.view(layer.w2_weight_offset.data.shape[0], -1)

View File

@@ -50,13 +50,7 @@ class AscendW8A8LinearMethod310(AscendLinearScheme):
def get_perchannel_param(self, output_size: int, params_dtype: torch.dtype) -> dict[str, Any]:
params: dict[str, Any] = {}
params["quant_bias"] = torch.empty(output_size, dtype=torch.int32)
# NOTE: keep identical to your current working behavior.
if params_dtype == torch.bfloat16:
params["deq_scale"] = torch.empty(output_size, dtype=torch.float32)
else:
params["deq_scale"] = torch.empty(output_size, dtype=torch.int64)
params["deq_scale"] = torch.empty(output_size, dtype=torch.int64)
params["weight_scale"] = torch.empty(output_size, 1, dtype=params_dtype)
params["weight_offset"] = torch.empty(output_size, 1, dtype=params_dtype)
return params

View File

@@ -31,14 +31,13 @@ from vllm.model_executor.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
)
# Important: trigger 310P method registrations (register into 310P-local registry)
from vllm_ascend._310p.quantization import methods as _methods_310p # noqa: F401
from vllm_ascend._310p.quantization.methods.registry import get_scheme_class as get_scheme_class_310p
from vllm_ascend.quantization.method_adapters import (
AscendLinearMethod,
from vllm_ascend._310p.quantization.methods.registry import (
get_scheme_class,
)
from vllm_ascend.quantization.method_adapters import AscendFusedMoEMethod, AscendLinearMethod
from vllm_ascend.quantization.modelslim_config import (
AscendModelSlimConfig,
get_quant_type_for_layer,
packed_modules_model_mapping,
)
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
@@ -47,31 +46,34 @@ logger = init_logger(__name__)
def create_scheme_for_layer(
cfg: AscendModelSlimConfig,
quant_description: dict[str, Any],
prefix: str,
layer_type: str,
packed_modules_mapping: dict[str, Any] | None = None,
):
"""Create 310P quant scheme (mainline-like).
"""Create a quantization scheme instance for a layer.
- If quant_type cannot be determined: raise ValueError
- If quant_type is determined but not supported on 310P: raise NotImplementedError
Args:
quant_description: The quantization description dictionary.
prefix: The layer prefix.
layer_type: The type of layer ("linear", "moe", "attention").
packed_modules_mapping: Mapping for packed/fused modules.
Returns:
An instance of the appropriate quantization scheme class.
"""
logger.info_once("Using 310P ModelSlim Quantization routing.")
logger.info_once("Using the vLLM Ascend modelslim Quantization now!")
quant_type = get_quant_type_for_layer(quant_description, prefix, layer_type, packed_modules_mapping)
if layer_type != "linear":
raise NotImplementedError(f"310P quantization: layer_type={layer_type} is not supported yet (TODO).")
quant_type = cfg._get_linear_quant_type(prefix)
if quant_type is None:
raise ValueError(f"310P quantization: could not determine quant_type for layer={prefix}.")
raise ValueError(f"Could not determine quantization type for layer {prefix}.")
scheme_cls = get_scheme_class_310p(quant_type, "linear")
if scheme_cls is None:
raise NotImplementedError(f"310P quantization: quant_type={quant_type} for linear is not supported yet (TODO).")
return scheme_cls()
# Use registry to get scheme class
scheme_cls = get_scheme_class(quant_type, layer_type)
if scheme_cls is not None:
return scheme_cls()
else:
raise NotImplementedError(f"Currently, vLLM Ascend doesn't support {quant_type} for {layer_type}.")
@register_quantization_config(ASCEND_QUANTIZATION_METHOD)
@@ -84,40 +86,6 @@ class AscendModelSlimConfig310(AscendModelSlimConfig):
causing NZ/transpose issues on 310P.
"""
def _get_linear_quant_type(self, prefix: str) -> str | None:
"""Packed-aware quant type lookup.
ModelSlim may describe fused modules by their shards.
Example:
prefix = "...qkv_proj" -> shards "...q_proj.weight", "...k_proj.weight", "...v_proj.weight"
"""
fused_mapping = getattr(self, "packed_modules_mapping", {}) or {}
proj_name = prefix.split(".")[-1]
if proj_name in fused_mapping:
shard_prefixes = [
prefix.replace(proj_name, shard_proj_name) for shard_proj_name in fused_mapping[proj_name]
]
quant_types: list[str] = []
for sp in shard_prefixes:
qt = self.quant_description.get(sp + ".weight")
if isinstance(qt, str):
quant_types.append(qt)
if not quant_types:
return None
first = quant_types[0]
if any(q != first for q in quant_types[1:]):
raise ValueError(
f"310P quantization: not all shards of fused layer '{prefix}' "
f"share the same quant type. shards={shard_prefixes}, types={quant_types}"
)
return first
qt = self.quant_description.get(prefix + ".weight")
return qt if isinstance(qt, str) else None
def get_quant_method(
self,
layer: torch.nn.Module,
@@ -141,7 +109,6 @@ class AscendModelSlimConfig310(AscendModelSlimConfig):
return AscendUnquantizedLinearMethod()
scheme = create_scheme_for_layer(
cfg=self,
quant_description=self.quant_description,
prefix=prefix,
layer_type="linear",
@@ -149,14 +116,15 @@ class AscendModelSlimConfig310(AscendModelSlimConfig):
)
return AscendLinearMethod(scheme)
if isinstance(layer, VocabParallelEmbedding):
elif isinstance(layer, FusedMoE):
if self.is_layer_skipped_ascend(prefix, self.packed_modules_mapping):
from vllm_ascend._310p.fused_moe.fused_moe import AscendUnquantizedFusedMoEMethod310
return AscendUnquantizedFusedMoEMethod310(layer.moe_config)
scheme = create_scheme_for_layer(self.quant_description, prefix, "moe", self.packed_modules_mapping)
return AscendFusedMoEMethod(scheme, layer.moe_config)
elif isinstance(layer, VocabParallelEmbedding):
return UnquantizedEmbeddingMethod()
if isinstance(layer, FusedMoE):
raise NotImplementedError(
"310P quantization: FusedMoE is not supported yet. "
"TODO: add 310P MoE quant schemes and routing. "
"Workaround: use a non-MoE model."
)
return super().get_quant_method(layer, prefix)