【feat】switch for fusion ops gmmswigluquant (#5992)

### What this PR does / why we need it?

Set a additional config parameter to control whether the gmmswigluequant
fuseion operator is enabled; it is enabled by True. / When enabled with
a small number of GPUs, the gmmswigluquant fused operator can cause some
performance degradation.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

#### Perf

test model: GLM 4.6(w8a8)
- single A3 node(ep16, tp16),  async-scheduling, mtp, FULL_DECODE_ONLY
- bs=1, input_lens=32000, ouput_lens=1024

Without this PR: TPOT 32.22.ms
With this PR: TPOT 30.23ms

---------

Signed-off-by: zjks98 <zhangjiakang4@huawei.com>
Co-authored-by: zjks98 <zhangjiakang4@huawei.com>
This commit is contained in:
aipaes
2026-01-19 21:19:25 +08:00
committed by GitHub
parent 38cfcd572a
commit f58e110afe
4 changed files with 45 additions and 1 deletions

View File

@@ -43,6 +43,9 @@ class TestAscendConfig(TestBase):
ascend_compilation_config = ascend_config.ascend_compilation_config
self.assertTrue(ascend_compilation_config.fuse_norm_quant)
ascend_fusion_config = ascend_config.ascend_fusion_config
self.assertTrue(ascend_fusion_config.fusion_ops_gmmswigluquant)
@_clean_up_ascend_config
def test_init_ascend_config_with_additional_config(self):
test_vllm_config = VllmConfig()
@@ -50,6 +53,9 @@ class TestAscendConfig(TestBase):
"ascend_compilation_config": {
"fuse_norm_quant": False,
},
"ascend_fusion_config": {
"fusion_ops_gmmswigluquant": False,
},
"multistream_overlap_shared_expert": True,
"eplb_config": {
"num_redundant_experts": 2
@@ -66,6 +72,9 @@ class TestAscendConfig(TestBase):
self.assertFalse(ascend_compilation_config.fuse_norm_quant)
self.assertFalse(ascend_config.enable_kv_nz)
ascend_fusion_config = ascend_config.ascend_fusion_config
self.assertFalse(ascend_fusion_config.fusion_ops_gmmswigluquant)
@_clean_up_ascend_config
def test_init_ascend_config_enable_npugraph_ex(self):
test_vllm_config = VllmConfig()

View File

@@ -37,6 +37,9 @@ class AscendConfig:
ascend_compilation_config = additional_config.get("ascend_compilation_config", {})
self.ascend_compilation_config = AscendCompilationConfig(**ascend_compilation_config)
ascend_fusion_config = additional_config.get("ascend_fusion_config", {})
self.ascend_fusion_config = AscendFusionConfig(**ascend_fusion_config)
finegrained_tp_config = additional_config.get("finegrained_tp_config", {})
self.finegrained_tp_config = FinegrainedTPConfig(finegrained_tp_config, vllm_config)
@@ -190,6 +193,24 @@ class AscendCompilationConfig:
self.fuse_allreduce_rms = fuse_allreduce_rms
class AscendFusionConfig:
"""
Configuration for controlling whether to use a fused operator gmmswigluquant.
"""
def __init__(self, fusion_ops_gmmswigluquant: bool = True, **kwargs):
"""
Initialize the configuration.
Args:
fusion_ops_gmmswigluquant (bool): Whether to use a fused operator gmmswigluquant.
When set to True, the system will use a fused operator gmmswigluquant.
Default: True
**kwargs: Additional optional parameters for forward compatibility and configuration extension.
"""
self.fusion_ops_gmmswigluquant = fusion_ops_gmmswigluquant
class XliteGraphConfig:
"""
Configuration Object for xlite_graph_config from additional_config

View File

@@ -48,6 +48,12 @@ def setup_moe_comm_method(moe_config):
_MoECommMethods[MoECommType.FUSED_MC2] = FusedMC2CommImpl(moe_config)
def set_gmmswigluquant_method():
from vllm_ascend.ascend_config import get_ascend_config
ascend_config = get_ascend_config()
return ascend_config.ascend_fusion_config.fusion_ops_gmmswigluquant
@dataclass
class FusedExpertsResult:
routed_out: torch.Tensor
@@ -69,6 +75,7 @@ class MoECommMethod(ABC):
self.token_dispatcher = self._get_token_dispatcher()
self.prepare_finalize = self._get_prepare_finalize()
self.use_fusion_ops = set_gmmswigluquant_method()
def prepare(
self,
@@ -159,7 +166,7 @@ class MoECommMethod(ABC):
w2_offset=w2_offset,
topk_scales=dispatch_results.topk_scales,
with_quant=use_int8_w8a8 or use_int4_w4a8 or use_int4_w4a16,
fusion=use_int8_w8a8,
fusion=use_int8_w8a8 and self.use_fusion_ops,
need_trans=need_trans,
dynamic_eplb=dynamic_eplb)

View File

@@ -206,6 +206,13 @@ class NPUPlatform(Platform):
elif model_config and hasattr(model_config.hf_text_config, "index_topk"):
vllm_config.cache_config.cache_dtype = str(model_config.dtype).replace("torch.", "")
ascend_fusion_config = ascend_config.ascend_fusion_config
if ascend_fusion_config:
vllm_config.additional_config.setdefault("ascend_fusion_config", {}).update(
vars(ascend_fusion_config) if not isinstance(ascend_fusion_config, dict) else ascend_fusion_config
)
if model_config is None:
logger.warning("Model config is missing. This may indicate that we are running a test case")
enforce_eager = False