### What this PR does / why we need it?
This PR introduces compatibility fixes for running vLLM on Ascend NPU
hardware. The changes ensure that GPU-specific parameters are
automatically detected and reset to Ascend-compatible values with
appropriate warnings logged.
| Module | Parameter | Default Value |
|--------|-----------|---------------|
| Model Config | `disable_cascade_attn` | `False` |
| Parallel Config | `all2all_backend` | `"allgather_reducescatter"` |
| Cache Config | `cpu_kvcache_space_bytes` | `None` |
| MultiModal Config | `mm_encoder_attn_backend` | `None` |
| Observability Config | `enable_layerwise_nvtx_tracing` | `False` |
| Scheduler Config | `max_num_partial_prefills` | `1` |
| Speculative Config | `quantization` | `None` |
| KV Transfer Config | `kv_buffer_size` | `1e9` |
| KV Transfer Config | `enable_permute_local_kv` | `False` |
| Attention Config | `use_prefill_decode_attention` | `False` |
| Attention Config | `use_cudnn_prefill` | `False` |
| Attention Config | `use_trtllm_ragged_deepseek_prefill` | `False` |
| Attention Config | `use_trtllm_attention` | `False` |
| Attention Config | `disable_flashinfer_prefill` | `False` |
| Attention Config | `disable_flashinfer_q_quantization` | `False` |
| Attention Config | `flash_attn_version` | `None` |
| Attention Config | `backend` | `None` |
| Attention Config | `flash_attn_max_num_splits_for_cuda_graph` | `32` |
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
109 lines
4.4 KiB
Python
109 lines
4.4 KiB
Python
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
|
|
from unittest.mock import patch
|
|
|
|
from vllm.config import VllmConfig
|
|
|
|
from tests.ut.base import TestBase
|
|
from vllm_ascend.ascend_config import clear_ascend_config, get_ascend_config, init_ascend_config
|
|
|
|
|
|
class TestAscendConfig(TestBase):
|
|
@staticmethod
|
|
def _clean_up_ascend_config(func):
|
|
def wrapper(*args, **kwargs):
|
|
clear_ascend_config()
|
|
func(*args, **kwargs)
|
|
clear_ascend_config()
|
|
|
|
return wrapper
|
|
|
|
@_clean_up_ascend_config
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
def test_init_ascend_config_without_additional_config(self, mock_fix_incompatible_config):
|
|
test_vllm_config = VllmConfig()
|
|
# No additional config given, check the default value here.
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
|
self.assertFalse(ascend_config.multistream_overlap_shared_expert)
|
|
self.assertFalse(ascend_config.enable_kv_nz)
|
|
|
|
ascend_compilation_config = ascend_config.ascend_compilation_config
|
|
self.assertTrue(ascend_compilation_config.fuse_norm_quant)
|
|
|
|
ascend_fusion_config = ascend_config.ascend_fusion_config
|
|
self.assertTrue(ascend_fusion_config.fusion_ops_gmmswigluquant)
|
|
|
|
@_clean_up_ascend_config
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
def test_init_ascend_config_with_additional_config(self, mock_fix_incompatible_config):
|
|
test_vllm_config = VllmConfig()
|
|
test_vllm_config.additional_config = {
|
|
"ascend_compilation_config": {
|
|
"fuse_norm_quant": False,
|
|
},
|
|
"ascend_fusion_config": {
|
|
"fusion_ops_gmmswigluquant": False,
|
|
},
|
|
"multistream_overlap_shared_expert": True,
|
|
"eplb_config": {"num_redundant_experts": 2},
|
|
"refresh": True,
|
|
"enable_kv_nz": False,
|
|
}
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
|
self.assertEqual(ascend_config.eplb_config.num_redundant_experts, 2)
|
|
self.assertTrue(ascend_config.multistream_overlap_shared_expert)
|
|
self.assertFalse(ascend_config.enable_npugraph_ex)
|
|
|
|
ascend_compilation_config = ascend_config.ascend_compilation_config
|
|
self.assertFalse(ascend_compilation_config.fuse_norm_quant)
|
|
self.assertFalse(ascend_config.enable_kv_nz)
|
|
|
|
ascend_fusion_config = ascend_config.ascend_fusion_config
|
|
self.assertFalse(ascend_fusion_config.fusion_ops_gmmswigluquant)
|
|
|
|
@_clean_up_ascend_config
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
def test_init_ascend_config_enable_npugraph_ex(self, mock_fix_incompatible_config):
|
|
test_vllm_config = VllmConfig()
|
|
test_vllm_config.additional_config = {
|
|
"enable_npugraph_ex": True,
|
|
"refresh": True,
|
|
}
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
|
self.assertTrue(ascend_config.enable_npugraph_ex)
|
|
|
|
@_clean_up_ascend_config
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
def test_get_ascend_config(self, mock_fix_incompatible_config):
|
|
test_vllm_config = VllmConfig()
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
|
self.assertEqual(get_ascend_config(), ascend_config)
|
|
|
|
@_clean_up_ascend_config
|
|
def test_get_ascend_config_without_init(self):
|
|
with self.assertRaises(RuntimeError):
|
|
get_ascend_config()
|
|
|
|
@_clean_up_ascend_config
|
|
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
|
|
def test_clear_ascend_config(self, mock_fix_incompatible_config):
|
|
test_vllm_config = VllmConfig()
|
|
ascend_config = init_ascend_config(test_vllm_config)
|
|
self.assertEqual(get_ascend_config(), ascend_config)
|
|
clear_ascend_config()
|
|
with self.assertRaises(RuntimeError):
|
|
get_ascend_config()
|