[EPLB]Eplb Config Renaming (#5533)

### What this PR does / why we need it?
1. Rename num_iterations_eplb_update to expert_heat_collection_interval.
2. Rename num_wait_worker_iterations to algorithm_execution_interval.
3. Rename init_redundancy_expert to num_redundant_experts because the
variable with the same meaning in vLLM is named this way.
4. Delete gate_eplb because we don't need this feature.
5. Move eplb config into a dict in additional config.
6. Depend on pr5817

### Does this PR introduce _any_ user-facing change?

before this pr:
`--additional-config '{"dynamic_eplb":true,
"num_iterations_eplb_update": 4000, "num_wait_worker_iterations": 150,
"init_redundancy_expert": 16, "expert_map_path": "xxx.json"}'`

after this pr: 
`--additional-config
'{"eplb_config":{"dynamic_eplb":true,"expert_heat_collection_interval":4000,
"algorithm_execution_interval":150,"num_redundant_experts": 16,
"expert_map_path": "xxx.json"}}'`

### How was this patch tested?

#### test qwen3-235b eplb num_redundant_experts=16

without pr5817
| dataset | version | metric | mode | vllm-api-general-chat |
|----- | ----- | ----- | ----- | -----|
| aime2024 | 604a78 | accuracy | gen | 83.33 |

with pr5817
| dataset | version | metric | mode | vllm-api-general-chat |
|----- | ----- | ----- | ----- | -----|
| aime2024 | 604a78 | accuracy | gen | 86.67 |

- vLLM version: v0.13.0
- vLLM main:
45c1ca1ca1

Signed-off-by: shenchuxiaofugui <1311027364@qq.com>
This commit is contained in:
LI SHENGYONG
2026-01-15 10:26:44 +08:00
committed by GitHub
parent ea01aeaab7
commit da958ee386
21 changed files with 174 additions and 349 deletions

View File

@@ -105,10 +105,12 @@ async def test_qwen3_moe_w8a8_distributed_tp2_ep_dynamic_eplb():
# during initialization in offline mode, so the online mode is used instead.
env_dict.update({"DYNAMIC_EPLB": "true"})
additional_config = {
"dynamic_eplb": True,
"num_iterations_eplb_update": 100,
"num_wait_worker_iterations": 20,
"num_redundant_experts": 2
"eplb_config": {
"dynamic_eplb": True,
"expert_heat_collection_interval": 100,
"algorithm_execution_interval": 20,
"num_redundant_experts": 2
}
}
server_args.extend(["--additional-config", json.dumps(additional_config)])
with RemoteOpenAIServer(model,

View File

@@ -55,7 +55,7 @@ deployment:
}
}'
--additional-config
'{"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"eplb_config": {"dynamic_eplb":true,"expert_heat_collection_interval":2048,"algorithm_execution_interval":200}}'
-
server_cmd: >
@@ -92,7 +92,7 @@ deployment:
}
}'
--additional-config
'{"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"enable_prefill_optimizations":true,"enable_weight_nz_layout":true,"eplb_config": {"dynamic_eplb":true,"expert_heat_collection_interval":2048,"algorithm_execution_interval":200}}'
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
@@ -130,7 +130,7 @@ deployment:
}
}'
--additional-config
'{"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"expert_heat_collection_interval":2048,"algorithm_execution_interval":200}'
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
@@ -167,7 +167,7 @@ deployment:
}
}'
--additional-config
'{"multistream_overlap_shared_expert":true,"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"multistream_overlap_shared_expert":true,"eplb_config": {"dynamic_eplb":true,"expert_heat_collection_interval":2048,"algorithm_execution_interval":200}}'
benchmarks:
perf:
case_type: performance

View File

@@ -51,7 +51,7 @@ deployment:
}
}'
--additional-config
'{"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"eplb_config": {"dynamic_eplb":true,"expert_heat_collection_interval":2048,"algorithm_execution_interval":200}}'
-
server_cmd: >
@@ -87,5 +87,5 @@ deployment:
}
}'
--additional-config
'{"dynamic_eplb":true,"num_iterations_eplb_update":2048,"num_wait_worker_iterations":200}'
'{"eplb_config": {"dynamic_eplb":true,"expert_heat_collection_interval":2048,"algorithm_execution_interval":200}}'
benchmarks:

View File

@@ -70,11 +70,12 @@ async def test_models(model: str) -> None:
additional_config: dict[str, Any] = {
"enable_shared_expert_dp": False,
"multistream_overlap_shared_expert": False,
"dynamic_eplb": True,
"num_iterations_eplb_update": 14000,
"num_wait_worker_iterations": 30,
"init_redundancy_expert": 0,
"gate_eplb": False
"eplb_config": {
"dynamic_eplb": True,
"expert_heat_collection_interval": 512,
"algorithm_execution_interval": 100,
"num_redundant_experts": 0
}
}
server_args = [
"--quantization", "ascend", "--seed", "1024",

View File

@@ -70,13 +70,13 @@ async def test_models(model: str) -> None:
"8192", "--max-num-seqs", "12", "--trust-remote-code",
"--gpu-memory-utilization", "0.9"
]
env_dict["EXPERT_MAP_RECORD"] = "true"
env_dict["DYNAMIC_EPLB"] = "true"
additional_config["dynamic_eplb"] = True
additional_config["num_iterations_eplb_update"] = 14000
additional_config["num_wait_worker_iterations"] = 30
additional_config["init_redundancy_expert"] = 0
additional_config["gate_eplb"] = False
additional_config["eplb_config"] = {
"dynamic_eplb": True,
"expert_heat_collection_interval": 512,
"algorithm_execution_interval": 100,
"num_redundant_experts": 0
}
server_args.extend(
["--compilation-config",
json.dumps(compilation_config)])

View File

@@ -1,10 +1,8 @@
import os
import sys
import unittest
from unittest.mock import patch
# isort: off
import pytest
import torch
from vllm.config import VllmConfig
from vllm.model_executor.layers.fused_moe.config import (FusedMoEConfig,
@@ -12,7 +10,7 @@ from vllm.model_executor.layers.fused_moe.config import (FusedMoEConfig,
)
from vllm_ascend.ascend_config import init_ascend_config
from vllm_ascend.eplb.core.eplb_utils import EPLBParamUtils, init_eplb_config
from vllm_ascend.eplb.core.eplb_utils import init_eplb_config
# isort: on
@@ -20,23 +18,28 @@ class TestAscendConfig(unittest.TestCase):
def setUp(self):
vllm_config = VllmConfig()
ascend_config = init_ascend_config(vllm_config)
ascend_config.dynamic_eplb = True
ascend_config.init_redundancy_expert = 2
vllm_config.additional_config = {
"refresh": True,
"eplb_config": {
"dynamic_eplb": True,
"num_redundant_experts": 2
}
}
moe_parallel_config = FusedMoEParallelConfig(2, 0, 1, 2, 1, 1, 1, 1,
True, "hccl")
moe_config = FusedMoEConfig(8, 8, 8192, 5, moe_parallel_config,
torch.float16)
moe_config.supports_eplb = True
self.ascend_config = ascend_config
self.vllm_config = vllm_config
self.moe_config = moe_config
self.mock_npu = patch("torch.Tensor.npu",
new=lambda self: self).start()
self.rank = 1
def test_init_eplb_config_with_eplb(self):
eplb_config = init_ascend_config(self.vllm_config).eplb_config
expert_map, log2phy, redundant_experts = init_eplb_config(
self.ascend_config, 0, self.moe_config)
eplb_config, 0, self.moe_config)
gt_expert_map = torch.tensor([4, -1, -1, -1, 0, 1, 2, 3])
gt_log2phy = torch.tensor([9, 1, 2, 3, 5, 6, 7, 8])
self.assertTrue(torch.equal(expert_map[self.rank], gt_expert_map))
@@ -45,9 +48,11 @@ class TestAscendConfig(unittest.TestCase):
def test_init_eplb_config_with_eplb_withmap(self):
_TEST_DIR = os.path.dirname(__file__)
self.ascend_config.expert_map_path = _TEST_DIR + "/expert_map.json"
self.vllm_config.additional_config["eplb_config"][
"expert_map_path"] = _TEST_DIR + "/expert_map.json"
eplb_config = init_ascend_config(self.vllm_config).eplb_config
expert_map, log2phy, redundant_experts = init_eplb_config(
self.ascend_config, 0, self.moe_config)
eplb_config, 0, self.moe_config)
gt_expert_map = torch.tensor([-1, 1, 4, -1, 2, -1, 0, 3])
gt_log2phy = torch.tensor([2, 6, 9, 3, 7, 4, 5, 8])
self.assertTrue(torch.equal(expert_map[self.rank], gt_expert_map))
@@ -55,159 +60,11 @@ class TestAscendConfig(unittest.TestCase):
self.assertEqual(redundant_experts, 2)
def test_init_eplb_config_without_eplb(self):
self.ascend_config.dynamic_eplb = False
self.ascend_config.expert_map_path = None
self.vllm_config.additional_config = {"refresh": True}
eplb_config = init_ascend_config(self.vllm_config).eplb_config
expert_map, log2phy, redundant_experts = init_eplb_config(
self.ascend_config, 0, self.moe_config)
eplb_config, 0, self.moe_config)
gt_expert_map = torch.tensor([-1, -1, -1, -1, 0, 1, 2, 3])
print(expert_map, log2phy, redundant_experts)
self.assertTrue(torch.equal(expert_map[self.rank], gt_expert_map))
self.assertEqual(redundant_experts, 0)
class TestEPLBParamUtils:
def test_check_iterations_valid(self):
EPLBParamUtils.check_iterations(1)
EPLBParamUtils.check_iterations(100)
def test_check_iterations_type_error(self):
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations("abc")
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations(1.5)
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations(None)
def test_check_iterations_value_error_less_than_or_equal_zero(self):
with pytest.raises(ValueError,
match="can not less than or equal to 0"):
EPLBParamUtils.check_iterations(0)
with pytest.raises(ValueError,
match="can not less than or equal to 0"):
EPLBParamUtils.check_iterations(-1)
def test_check_iterations_value_error_large_than_sys_maxsize(self):
large_value = sys.maxsize + 1
with pytest.raises(ValueError,
match=f"can not large than {sys.maxsize}"):
EPLBParamUtils.check_iterations(large_value)
def test_check_dynamic_eplb_none(self):
EPLBParamUtils.check_dynamic_eplb(None)
def test_check_dynamic_eplb_valid_bool(self):
EPLBParamUtils.check_dynamic_eplb(False)
def test_check_dynamic_eplb_type_error(self):
with pytest.raises(TypeError, match="The dynamic_eplb is not bool."):
EPLBParamUtils.check_dynamic_eplb("true")
with pytest.raises(TypeError, match="The dynamic_eplb is not bool."):
EPLBParamUtils.check_dynamic_eplb(1)
def test_check_dynamic_eplb_value_error_env_not_set(self, monkeypatch):
monkeypatch.delenv("DYNAMIC_EPLB", raising=False)
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when DYNAMIC_EPLB is not set to "true" or "1".'
):
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "false")
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when DYNAMIC_EPLB is not set to "true" or "1".'
):
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "any_other_value")
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when DYNAMIC_EPLB is not set to "true" or "1".'
):
EPLBParamUtils.check_dynamic_eplb(True)
def test_check_dynamic_eplb_valid_with_env_set(self, monkeypatch):
monkeypatch.setenv("DYNAMIC_EPLB", "true")
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "True")
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "1")
EPLBParamUtils.check_dynamic_eplb(True)
def test_check_expert_map_path_none(self):
EPLBParamUtils.check_expert_map_path(None)
def test_check_expert_map_path_type_error_not_string(self):
with pytest.raises(TypeError, match="The expert_map is not str."):
EPLBParamUtils.check_expert_map_path(123)
with pytest.raises(TypeError, match="The expert_map is not str."):
EPLBParamUtils.check_expert_map_path(True)
def test_check_expert_map_path_value_error_empty_string(self):
with pytest.raises(ValueError, match="The expert_map is not empty."):
EPLBParamUtils.check_expert_map_path("")
with pytest.raises(ValueError, match="The expert_map is not empty."):
EPLBParamUtils.check_expert_map_path(" ")
def test_check_expert_map_path_type_error_incorrect_extension(self):
with pytest.raises(TypeError, match="The expert_map is not json."):
EPLBParamUtils.check_expert_map_path("path/to/map.txt")
with pytest.raises(TypeError, match="The expert_map is not json."):
EPLBParamUtils.check_expert_map_path("path/to/map.JSON_")
@patch('os.path.exists', return_value=False)
def test_check_expert_map_path_value_error_not_exist(self, mock_exists):
with pytest.raises(ValueError, match="The expert_map is not exist."):
EPLBParamUtils.check_expert_map_path("non_existent_map.json")
mock_exists.assert_called_once_with("non_existent_map.json")
def test_check_expert_map_record_path_none(self):
EPLBParamUtils.check_expert_map_record_path(None)
def test_check_expert_map_record_path_type_error_not_string(self):
with pytest.raises(TypeError,
match="The expert_map_record_path is not str."):
EPLBParamUtils.check_expert_map_record_path(123)
with pytest.raises(TypeError,
match="The expert_map_record_path is not str."):
EPLBParamUtils.check_expert_map_record_path(False)
def test_check_expert_map_record_path_value_error_empty_string(self):
with pytest.raises(ValueError,
match="The expert_map_record_path is empty."):
EPLBParamUtils.check_expert_map_record_path("")
with pytest.raises(ValueError,
match="The expert_map_record_path is empty."):
EPLBParamUtils.check_expert_map_record_path(" ")
def test_check_expert_map_record_path_type_error_incorrect_extension(self):
with pytest.raises(TypeError,
match="The expert_map_record_path is not json."):
EPLBParamUtils.check_expert_map_record_path("path/to/record.txt")
with pytest.raises(TypeError,
match="The expert_map_record_path is not json."):
EPLBParamUtils.check_expert_map_record_path("path/to/record.XML")
def test_check_expert_map_record_path_value_error_env_not_set(
self, monkeypatch):
monkeypatch.delenv("EXPERT_MAP_RECORD", raising=False)
with pytest.raises(
ValueError,
match=
'Can not enable expert_map_record_path when not export EXPERT_MAP_RECORD="true".'
):
EPLBParamUtils.check_expert_map_record_path("path/to/record.json")
monkeypatch.setenv("EXPERT_MAP_RECORD", "false")
with pytest.raises(
ValueError,
match=
'Can not enable expert_map_record_path when not export EXPERT_MAP_RECORD="true".'
):
EPLBParamUtils.check_expert_map_record_path("path/to/record.json")

View File

@@ -101,8 +101,8 @@ class TestAscendW4A16FusedMoEMethod(TestBase):
@patch("vllm_ascend.quantization.w4a16.get_current_vllm_config")
def setUp(self, mock_get_current_vllm_config, mock_get_ascend_config):
mock_ascend_config = Mock()
mock_ascend_config.dynamic_eplb = False
mock_ascend_config.expert_map_record_path = None
mock_ascend_config.eplb_config.dynamic_eplb = False
mock_ascend_config.eplb_config.expert_map_record_path = None
mock_get_ascend_config.return_value = mock_ascend_config
mock_vllm_config = Mock()

View File

@@ -136,7 +136,7 @@ class TestAscendW4A8DynamicFusedMoEMethod(TestBase):
get_current_vllm_config, mock_get_ascend_config):
# Mock ascend config
mock_ascend_config = Mock()
mock_ascend_config.dynamic_eplb = False
mock_ascend_config.eplb_config.dynamic_eplb = False
mock_get_ascend_config.return_value = mock_ascend_config
mock_vllm_config = Mock()

View File

@@ -37,7 +37,6 @@ class TestAscendConfig(TestBase):
test_vllm_config = VllmConfig()
# No additional config given, check the default value here.
ascend_config = init_ascend_config(test_vllm_config)
self.assertIsNone(ascend_config.expert_map_path)
self.assertFalse(ascend_config.multistream_overlap_shared_expert)
self.assertFalse(ascend_config.enable_kv_nz)
@@ -52,12 +51,14 @@ class TestAscendConfig(TestBase):
"fuse_norm_quant": False,
},
"multistream_overlap_shared_expert": True,
"expert_map_path": "test_expert_map_path",
"eplb_config": {
"num_redundant_experts": 2
},
"refresh": True,
"enable_kv_nz": False
}
ascend_config = init_ascend_config(test_vllm_config)
self.assertEqual(ascend_config.expert_map_path, "test_expert_map_path")
self.assertEqual(ascend_config.eplb_config.num_redundant_experts, 2)
self.assertTrue(ascend_config.multistream_overlap_shared_expert)
self.assertFalse(ascend_config.enable_npugraph_ex)