From a80e524fbcaf58576ba7bfe56365a4259a24b692 Mon Sep 17 00:00:00 2001 From: dsxsteven <36877507+dsxsteven@users.noreply.github.com> Date: Tue, 3 Feb 2026 19:49:58 +0800 Subject: [PATCH] [Quant] GLM4.7-Flash Support W8A8 (#6492) ### What this PR does / why we need it? support W8A8 quant for model GLM4.7-flash ### Does this PR introduce _any_ user-facing change? Yes ### How was this patch tested? - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 Signed-off-by: dsxsteven Co-authored-by: SlightwindSec --- vllm_ascend/quantization/modelslim_config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm_ascend/quantization/modelslim_config.py b/vllm_ascend/quantization/modelslim_config.py index 5464cee7..0303b0dc 100644 --- a/vllm_ascend/quantization/modelslim_config.py +++ b/vllm_ascend/quantization/modelslim_config.py @@ -166,6 +166,12 @@ packed_modules_model_mapping: Dict[str, Dict[str, List[str]]] = { "experts": ["experts.0.gate_proj", "experts.0.up_proj", "experts.0.down_proj"] }, + "glm4_moe_lite": { + "gate_up_proj": ["gate_proj", "up_proj"], + "experts": + ["experts.0.gate_proj", "experts.0.up_proj", "experts.0.down_proj"], + "fused_qkv_a_proj": ["q_a_proj", "kv_a_proj_with_mqa"] + }, "longcat_flash": { "gate_up_proj": ["gate_proj", "up_proj"], "experts":