[Feat] 310p support MoE W8A8 quantizaition (#6641)
### What this PR does / why we need it?
This PR introduces support for W8A8 dynamic quantization for
Mixture-of-Experts (MoE) models on Ascend 310P devices. This is achieved
by:
- Implementing a new quantization scheme
`AscendW8A8DynamicFusedMoEMethod310`.
- Adding a unified MLP implementation (`unified_apply_mlp`) for 310P
that handles both quantized and unquantized paths.
- Refactoring the MoE and quantization configuration logic to correctly
route to the new 310P-specific implementations.
- Adding new e2e and unit tests to verify the functionality of MoE W8A8
quantization.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
- Added a new e2e test `test_qwen3_moe_tp2_w8a8` to test MoE W8A8
quantization in a multi-card setup.
- Added several new unit tests for the 310P-specific MoE components,
including `experts_selector`, `fused_moe`, `moe_comm_method`, `moe_mlp`,
and the new `w8a8_dynamic` quantization method.
- vLLM version: v0.15.0
- vLLM main:
d7e17aaacd
---------
Signed-off-by: pu-zhe <zpuaa@outlook.com>
This commit is contained in:
@@ -31,14 +31,13 @@ from vllm.model_executor.layers.vocab_parallel_embedding import (
|
||||
VocabParallelEmbedding,
|
||||
)
|
||||
|
||||
# Important: trigger 310P method registrations (register into 310P-local registry)
|
||||
from vllm_ascend._310p.quantization import methods as _methods_310p # noqa: F401
|
||||
from vllm_ascend._310p.quantization.methods.registry import get_scheme_class as get_scheme_class_310p
|
||||
from vllm_ascend.quantization.method_adapters import (
|
||||
AscendLinearMethod,
|
||||
from vllm_ascend._310p.quantization.methods.registry import (
|
||||
get_scheme_class,
|
||||
)
|
||||
from vllm_ascend.quantization.method_adapters import AscendFusedMoEMethod, AscendLinearMethod
|
||||
from vllm_ascend.quantization.modelslim_config import (
|
||||
AscendModelSlimConfig,
|
||||
get_quant_type_for_layer,
|
||||
packed_modules_model_mapping,
|
||||
)
|
||||
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
|
||||
@@ -47,31 +46,34 @@ logger = init_logger(__name__)
|
||||
|
||||
|
||||
def create_scheme_for_layer(
|
||||
cfg: AscendModelSlimConfig,
|
||||
quant_description: dict[str, Any],
|
||||
prefix: str,
|
||||
layer_type: str,
|
||||
packed_modules_mapping: dict[str, Any] | None = None,
|
||||
):
|
||||
"""Create 310P quant scheme (mainline-like).
|
||||
"""Create a quantization scheme instance for a layer.
|
||||
|
||||
- If quant_type cannot be determined: raise ValueError
|
||||
- If quant_type is determined but not supported on 310P: raise NotImplementedError
|
||||
Args:
|
||||
quant_description: The quantization description dictionary.
|
||||
prefix: The layer prefix.
|
||||
layer_type: The type of layer ("linear", "moe", "attention").
|
||||
packed_modules_mapping: Mapping for packed/fused modules.
|
||||
|
||||
Returns:
|
||||
An instance of the appropriate quantization scheme class.
|
||||
"""
|
||||
logger.info_once("Using 310P ModelSlim Quantization routing.")
|
||||
logger.info_once("Using the vLLM Ascend modelslim Quantization now!")
|
||||
quant_type = get_quant_type_for_layer(quant_description, prefix, layer_type, packed_modules_mapping)
|
||||
|
||||
if layer_type != "linear":
|
||||
raise NotImplementedError(f"310P quantization: layer_type={layer_type} is not supported yet (TODO).")
|
||||
|
||||
quant_type = cfg._get_linear_quant_type(prefix)
|
||||
if quant_type is None:
|
||||
raise ValueError(f"310P quantization: could not determine quant_type for layer={prefix}.")
|
||||
raise ValueError(f"Could not determine quantization type for layer {prefix}.")
|
||||
|
||||
scheme_cls = get_scheme_class_310p(quant_type, "linear")
|
||||
if scheme_cls is None:
|
||||
raise NotImplementedError(f"310P quantization: quant_type={quant_type} for linear is not supported yet (TODO).")
|
||||
|
||||
return scheme_cls()
|
||||
# Use registry to get scheme class
|
||||
scheme_cls = get_scheme_class(quant_type, layer_type)
|
||||
if scheme_cls is not None:
|
||||
return scheme_cls()
|
||||
else:
|
||||
raise NotImplementedError(f"Currently, vLLM Ascend doesn't support {quant_type} for {layer_type}.")
|
||||
|
||||
|
||||
@register_quantization_config(ASCEND_QUANTIZATION_METHOD)
|
||||
@@ -84,40 +86,6 @@ class AscendModelSlimConfig310(AscendModelSlimConfig):
|
||||
causing NZ/transpose issues on 310P.
|
||||
"""
|
||||
|
||||
def _get_linear_quant_type(self, prefix: str) -> str | None:
|
||||
"""Packed-aware quant type lookup.
|
||||
|
||||
ModelSlim may describe fused modules by their shards.
|
||||
Example:
|
||||
prefix = "...qkv_proj" -> shards "...q_proj.weight", "...k_proj.weight", "...v_proj.weight"
|
||||
"""
|
||||
fused_mapping = getattr(self, "packed_modules_mapping", {}) or {}
|
||||
proj_name = prefix.split(".")[-1]
|
||||
|
||||
if proj_name in fused_mapping:
|
||||
shard_prefixes = [
|
||||
prefix.replace(proj_name, shard_proj_name) for shard_proj_name in fused_mapping[proj_name]
|
||||
]
|
||||
quant_types: list[str] = []
|
||||
for sp in shard_prefixes:
|
||||
qt = self.quant_description.get(sp + ".weight")
|
||||
if isinstance(qt, str):
|
||||
quant_types.append(qt)
|
||||
|
||||
if not quant_types:
|
||||
return None
|
||||
|
||||
first = quant_types[0]
|
||||
if any(q != first for q in quant_types[1:]):
|
||||
raise ValueError(
|
||||
f"310P quantization: not all shards of fused layer '{prefix}' "
|
||||
f"share the same quant type. shards={shard_prefixes}, types={quant_types}"
|
||||
)
|
||||
return first
|
||||
|
||||
qt = self.quant_description.get(prefix + ".weight")
|
||||
return qt if isinstance(qt, str) else None
|
||||
|
||||
def get_quant_method(
|
||||
self,
|
||||
layer: torch.nn.Module,
|
||||
@@ -141,7 +109,6 @@ class AscendModelSlimConfig310(AscendModelSlimConfig):
|
||||
return AscendUnquantizedLinearMethod()
|
||||
|
||||
scheme = create_scheme_for_layer(
|
||||
cfg=self,
|
||||
quant_description=self.quant_description,
|
||||
prefix=prefix,
|
||||
layer_type="linear",
|
||||
@@ -149,14 +116,15 @@ class AscendModelSlimConfig310(AscendModelSlimConfig):
|
||||
)
|
||||
return AscendLinearMethod(scheme)
|
||||
|
||||
if isinstance(layer, VocabParallelEmbedding):
|
||||
elif isinstance(layer, FusedMoE):
|
||||
if self.is_layer_skipped_ascend(prefix, self.packed_modules_mapping):
|
||||
from vllm_ascend._310p.fused_moe.fused_moe import AscendUnquantizedFusedMoEMethod310
|
||||
|
||||
return AscendUnquantizedFusedMoEMethod310(layer.moe_config)
|
||||
scheme = create_scheme_for_layer(self.quant_description, prefix, "moe", self.packed_modules_mapping)
|
||||
return AscendFusedMoEMethod(scheme, layer.moe_config)
|
||||
|
||||
elif isinstance(layer, VocabParallelEmbedding):
|
||||
return UnquantizedEmbeddingMethod()
|
||||
|
||||
if isinstance(layer, FusedMoE):
|
||||
raise NotImplementedError(
|
||||
"310P quantization: FusedMoE is not supported yet. "
|
||||
"TODO: add 310P MoE quant schemes and routing. "
|
||||
"Workaround: use a non-MoE model."
|
||||
)
|
||||
|
||||
return super().get_quant_method(layer, prefix)
|
||||
|
||||
Reference in New Issue
Block a user