Files
xc-llm-ascend/tests/ut/test_ascend_config.py
ChenCangtao 6c30f8bf87 [Feature]refactor the npugraph_ex config, support online-infer with static kernel (#5775)
### What this PR does / why we need it?
This is a part of
https://github.com/vllm-project/vllm-ascend/issues/4715#issue-3694310762
1. refactor the npugraph_ex config,modified the default configuration of
the static kernel, new default value of static kernel is false
2. support online-infer with static kernel
3. fixed the issue where manually modifying FX graphs caused an abnormal
model return type, and removed the related redundant code.

### Does this PR introduce _any_ user-facing change?
yes,the new config of npugraph_ex is as follow:
```
additional_config={
            "npugraph_ex_config": {
                "enable": True,
                "enable_static_kernel": False
            }
        }
```
### How was this patch tested?
```
vllm serve /data/DeepSeek-V3.1-Terminus-w4a8 \
    --host 0.0.0.0 \
    --port 8004 \
    --data-parallel-size 4 \
    --tensor-parallel-size 4 \
    --quantization ascend \
    --seed 1024 \
    --served-model-name deepseek_v3 \
    --enable-expert-parallel \
    --max-num-seqs 48 \
    --max-model-len 40000 \
    --async-scheduling \
    --max-num-batched-tokens 9000 \
    --trust-remote-code \
    --no-enable-prefix-caching \
    --speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp","disable_padded_drafter_batch": false}' \
    --gpu-memory-utilization 0.9 \
    --compilation-config '{"cudagraph_capture_sizes":[4,32,64,112,160,176,192], "cudagraph_mode": "FULL_DECODE_ONLY"}' \
    --additional-config \
    '{"enable_shared_expert_dp": true,"multistream_overlap_shared_expert": true,"npugraph_ex_config":{"enable":true}}'
```

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

---------

Signed-off-by: chencangtao <chencangtao@huawei.com>
Signed-off-by: ChenCangtao <50493711+ChenCangtao@users.noreply.github.com>
Co-authored-by: chencangtao <chencangtao@huawei.com>
2026-01-20 21:31:38 +08:00

117 lines
4.7 KiB
Python

#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from unittest.mock import patch
from vllm.config import VllmConfig
from tests.ut.base import TestBase
from vllm_ascend.ascend_config import clear_ascend_config, get_ascend_config, init_ascend_config
class TestAscendConfig(TestBase):
@staticmethod
def _clean_up_ascend_config(func):
def wrapper(*args, **kwargs):
clear_ascend_config()
func(*args, **kwargs)
clear_ascend_config()
return wrapper
@_clean_up_ascend_config
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
def test_init_ascend_config_without_additional_config(self, mock_fix_incompatible_config):
test_vllm_config = VllmConfig()
# No additional config given, check the default value here.
ascend_config = init_ascend_config(test_vllm_config)
self.assertFalse(ascend_config.multistream_overlap_shared_expert)
self.assertFalse(ascend_config.enable_kv_nz)
ascend_compilation_config = ascend_config.ascend_compilation_config
self.assertTrue(ascend_compilation_config.fuse_norm_quant)
ascend_fusion_config = ascend_config.ascend_fusion_config
self.assertTrue(ascend_fusion_config.fusion_ops_gmmswigluquant)
@_clean_up_ascend_config
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
def test_init_ascend_config_with_additional_config(self, mock_fix_incompatible_config):
test_vllm_config = VllmConfig()
test_vllm_config.additional_config = {
"ascend_compilation_config": {
"fuse_norm_quant": False,
},
"ascend_fusion_config": {
"fusion_ops_gmmswigluquant": False,
},
"multistream_overlap_shared_expert": True,
"eplb_config": {"num_redundant_experts": 2},
"refresh": True,
"enable_kv_nz": False,
}
ascend_config = init_ascend_config(test_vllm_config)
self.assertEqual(ascend_config.eplb_config.num_redundant_experts, 2)
self.assertTrue(ascend_config.multistream_overlap_shared_expert)
npugraph_ex_config = ascend_config.npugraph_ex_config
self.assertFalse(npugraph_ex_config.enable)
self.assertFalse(npugraph_ex_config.enable_static_kernel)
ascend_compilation_config = ascend_config.ascend_compilation_config
self.assertFalse(ascend_compilation_config.fuse_norm_quant)
self.assertFalse(ascend_config.enable_kv_nz)
ascend_fusion_config = ascend_config.ascend_fusion_config
self.assertFalse(ascend_fusion_config.fusion_ops_gmmswigluquant)
@_clean_up_ascend_config
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
def test_init_ascend_config_enable_npugraph_ex(self, mock_fix_incompatible_config):
test_vllm_config = VllmConfig()
test_vllm_config.additional_config = {
"npugraph_ex_config": {
"enable": True,
"enable_static_kernel": True
},
"refresh": True
}
npugraph_ex_config = init_ascend_config(
test_vllm_config).npugraph_ex_config
self.assertTrue(npugraph_ex_config.enable)
self.assertTrue(npugraph_ex_config.enable_static_kernel)
@_clean_up_ascend_config
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
def test_get_ascend_config(self, mock_fix_incompatible_config):
test_vllm_config = VllmConfig()
ascend_config = init_ascend_config(test_vllm_config)
self.assertEqual(get_ascend_config(), ascend_config)
@_clean_up_ascend_config
def test_get_ascend_config_without_init(self):
with self.assertRaises(RuntimeError):
get_ascend_config()
@_clean_up_ascend_config
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
def test_clear_ascend_config(self, mock_fix_incompatible_config):
test_vllm_config = VllmConfig()
ascend_config = init_ascend_config(test_vllm_config)
self.assertEqual(get_ascend_config(), ascend_config)
clear_ascend_config()
with self.assertRaises(RuntimeError):
get_ascend_config()