[Bugfix] Reset incompatible config (#6005)

### What this PR does / why we need it?
This PR introduces compatibility fixes for running vLLM on Ascend NPU
hardware. The changes ensure that GPU-specific parameters are
automatically detected and reset to Ascend-compatible values with
appropriate warnings logged.

| Module | Parameter | Default Value |
|--------|-----------|---------------|
| Model Config | `disable_cascade_attn` | `False` |
| Parallel Config | `all2all_backend` | `"allgather_reducescatter"` |
| Cache Config | `cpu_kvcache_space_bytes` | `None` |
| MultiModal Config | `mm_encoder_attn_backend` | `None` |
| Observability Config | `enable_layerwise_nvtx_tracing` | `False` |
| Scheduler Config | `max_num_partial_prefills` | `1` |
| Speculative Config | `quantization` | `None` |
| KV Transfer Config | `kv_buffer_size` | `1e9` |
| KV Transfer Config | `enable_permute_local_kv` | `False` |
| Attention Config | `use_prefill_decode_attention` | `False` |
| Attention Config | `use_cudnn_prefill` | `False` |
| Attention Config | `use_trtllm_ragged_deepseek_prefill` | `False` |
| Attention Config | `use_trtllm_attention` | `False` |
| Attention Config | `disable_flashinfer_prefill` | `False` |
| Attention Config | `disable_flashinfer_q_quantization` | `False` |
| Attention Config | `flash_attn_version` | `None` |
| Attention Config | `backend` | `None` |
| Attention Config | `flash_attn_max_num_splits_for_cuda_graph` | `32` |

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2c24bc6996

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
zhangxinyuehfad
2026-01-20 11:02:38 +08:00
committed by GitHub
parent a8576ec610
commit a5b099c73d
4 changed files with 242 additions and 154 deletions

View File

@@ -5,9 +5,7 @@ from unittest.mock import patch
# isort: off
import torch
from vllm.config import VllmConfig
from vllm.model_executor.layers.fused_moe.config import (FusedMoEConfig,
FusedMoEParallelConfig
)
from vllm.model_executor.layers.fused_moe.config import FusedMoEConfig, FusedMoEParallelConfig
from vllm_ascend.ascend_config import init_ascend_config
from vllm_ascend.eplb.core.eplb_utils import init_eplb_config
@@ -15,30 +13,23 @@ from vllm_ascend.eplb.core.eplb_utils import init_eplb_config
class TestAscendConfig(unittest.TestCase):
def setUp(self):
@patch("vllm_ascend.platform.NPUPlatform._fix_incompatible_config")
def setUp(self, mock_fix_incompatible_config):
vllm_config = VllmConfig()
vllm_config.additional_config = {
"refresh": True,
"eplb_config": {
"dynamic_eplb": True,
"num_redundant_experts": 2
}
"eplb_config": {"dynamic_eplb": True, "num_redundant_experts": 2},
}
moe_parallel_config = FusedMoEParallelConfig(2, 0, 1, 2, 1, 1, 1, 1,
True, "hccl")
moe_config = FusedMoEConfig(8, 8, 8192, 5, moe_parallel_config,
torch.float16)
moe_parallel_config = FusedMoEParallelConfig(2, 0, 1, 2, 1, 1, 1, 1, True, "hccl")
moe_config = FusedMoEConfig(8, 8, 8192, 5, moe_parallel_config, torch.float16)
moe_config.supports_eplb = True
self.vllm_config = vllm_config
self.moe_config = moe_config
self.mock_npu = patch("torch.Tensor.npu",
new=lambda self: self).start()
self.mock_npu = patch("torch.Tensor.npu", new=lambda self: self).start()
def test_init_eplb_config_with_eplb(self):
eplb_config = init_ascend_config(self.vllm_config).eplb_config
_, expert_map, log2phy, redundant_experts = init_eplb_config(
eplb_config, 0, self.moe_config)
_, expert_map, log2phy, redundant_experts = init_eplb_config(eplb_config, 0, self.moe_config)
gt_expert_map = torch.tensor([4, -1, -1, -1, 0, 1, 2, 3])
gt_log2phy = torch.tensor([9, 1, 2, 3, 5, 6, 7, 8])
self.assertTrue(torch.equal(expert_map, gt_expert_map))
@@ -47,11 +38,9 @@ class TestAscendConfig(unittest.TestCase):
def test_init_eplb_config_with_eplb_withmap(self):
_TEST_DIR = os.path.dirname(__file__)
self.vllm_config.additional_config["eplb_config"][
"expert_map_path"] = _TEST_DIR + "/expert_map.json"
self.vllm_config.additional_config["eplb_config"]["expert_map_path"] = _TEST_DIR + "/expert_map.json"
eplb_config = init_ascend_config(self.vllm_config).eplb_config
_, expert_map, log2phy, redundant_experts = init_eplb_config(
eplb_config, 0, self.moe_config)
_, expert_map, log2phy, redundant_experts = init_eplb_config(eplb_config, 0, self.moe_config)
gt_expert_map = torch.tensor([-1, 1, 4, -1, 2, -1, 0, 3])
gt_log2phy = torch.tensor([2, 6, 9, 3, 7, 4, 5, 8])
self.assertTrue(torch.equal(expert_map, gt_expert_map))
@@ -61,8 +50,7 @@ class TestAscendConfig(unittest.TestCase):
def test_init_eplb_config_without_eplb(self):
self.vllm_config.additional_config = {"refresh": True}
eplb_config = init_ascend_config(self.vllm_config).eplb_config
_, expert_map, log2phy, redundant_experts = init_eplb_config(
eplb_config, 0, self.moe_config)
_, expert_map, log2phy, redundant_experts = init_eplb_config(eplb_config, 0, self.moe_config)
gt_expert_map = torch.tensor([-1, -1, -1, -1, 0, 1, 2, 3])
print(expert_map, log2phy, redundant_experts)
self.assertTrue(torch.equal(expert_map, gt_expert_map))