diff --git a/tests/e2e/nightly/single_node/models/test_glm4_5.py b/tests/e2e/nightly/single_node/models/test_glm4_5.py index 1255ddd0..49809cfb 100644 --- a/tests/e2e/nightly/single_node/models/test_glm4_5.py +++ b/tests/e2e/nightly/single_node/models/test_glm4_5.py @@ -29,6 +29,7 @@ MODELS = [ TENSOR_PARALLELS = [8] DATA_PARALLELS = [2] +FULL_GRAPH = [True, False] prompts = [ "San Francisco is a", @@ -65,11 +66,9 @@ aisbench_cases = [{ @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) @pytest.mark.parametrize("dp_size", DATA_PARALLELS) -async def test_models( - model: str, - tp_size: int, - dp_size: int, -) -> None: +@pytest.mark.parametrize("full_graph", FULL_GRAPH) +async def test_models(model: str, tp_size: int, dp_size: int, + full_graph: bool) -> None: port = get_open_port() env_dict = {"HCCL_BUFFSIZE": "1024"} server_args = [ @@ -91,6 +90,11 @@ async def test_models( "--gpu-memory-utilization", "0.9", ] + if full_graph: + server_args += [ + "--compilation-config", + '{"cudagraph_capture": [1,2,4,8,16], "cudagraph_model":"FULL_DECODE_ONLY"}' + ] request_keyword_args: dict[str, Any] = { **api_keyword_args, } diff --git a/vllm_ascend/quantization/quant_config.py b/vllm_ascend/quantization/quant_config.py index f6a98241..1d0ddd74 100644 --- a/vllm_ascend/quantization/quant_config.py +++ b/vllm_ascend/quantization/quant_config.py @@ -173,7 +173,15 @@ class AscendQuantConfig(QuantizationConfig): "are quantized. All shards of fused layers " "to have the same precision.") else: - is_skipped = self.quant_description[prefix + '.weight'] == "FLOAT" + # NOTE: In GLM4.6, the MTP draft model shares the same LM head weigthts + # with the main model. Therefore, before `load_weights()` runs, some parameter + # names may not include the expected prefix and may appear only with the + # ".head" suffix. This can trigger a load-time error, so here we replace the + # key with "lm_head.weight". + key = prefix + '.weight' + if key not in self.quant_description and ".head" in prefix: + key = 'lm_head.weight' + is_skipped = self.quant_description[key] == "FLOAT" assert is_skipped is not None return is_skipped