[CI]Add EPLB CI. (#3568)

### What this PR does / why we need it?
1.Add eplb ci to check the change of eplb feature.
2.Add param checking of eplb params. 
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?
Qwen in A3.


- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

---------

Signed-off-by: offline0806 <3337230449@qq.com>
Co-authored-by: offline0806 <3337230449@qq.com>
This commit is contained in:
offline893
2025-10-21 22:58:02 +08:00
committed by GitHub
parent 4c9af353ee
commit e916265b2b
11 changed files with 461 additions and 11 deletions

View File

@@ -0,0 +1,106 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"vllm-ascend/DeepSeek-R1-W8A8",
]
TENSOR_PARALLELS = [8]
DATA_PARALLELS = [2]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 80,
"max_out_len": 1500,
"batch_size": 20,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
@pytest.mark.parametrize("dp_size", DATA_PARALLELS)
async def test_models(model: str, tp_size: int, dp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500",
"DYNAMIC_EPLB": "true"
}
server_args = [
"--no-enable-prefix-caching", "--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size), "--data-parallel-size",
str(dp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--quantization", "ascend", "--gpu-memory-utilization", "0.9",
"--additional-config", '{"enable_weight_nz_layout":true, '
'"torch_air_graph_config":{"enabled": true, "enable_multistream_mla": true, "graph_batch_size": [16], "use_cached_graph": true},'
'"dynamic_eplb": true, "num_iterations_eplb_update": 200, "num_wait_worker_iterations": 100, "init_redundancy_expert": 16}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)

View File

@@ -0,0 +1,104 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"vllm-ascend/Qwen3-235B-A22B-W8A8",
]
TENSOR_PARALLELS = [16]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 80,
"max_out_len": 1500,
"batch_size": 20,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500",
"DYNAMIC_EPLB": "true"
}
server_args = [
"--no-enable-prefix-caching", "--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--quantization", "ascend", "--gpu-memory-utilization", "0.9",
"--additional-config",
'{"enable_weight_nz_layout":true, "dynamic_eplb": true, '
'"num_iterations_eplb_update": 200, "num_wait_worker_iterations": 100, '
'"init_redundancy_expert": 16}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)

View File

@@ -1,8 +1,12 @@
import random
import sys
from unittest.mock import patch
import pytest
import torch
from vllm_ascend.eplb.core import eplb_utils
from vllm_ascend.eplb.core.eplb_utils import EPLBParamUtils
def test_determine_default_expert_map_single_world():
@@ -77,3 +81,145 @@ def test_determine_default_log2phy_map_world_size_multiple():
global_redundant_expert_num=1)
assert log2phy.shape == (6, )
assert (log2phy >= 0).all()
class TestEPLBParamUtils:
def test_check_iterations_valid(self):
EPLBParamUtils.check_iterations(1)
EPLBParamUtils.check_iterations(100)
def test_check_iterations_type_error(self):
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations("abc")
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations(1.5)
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations(None)
def test_check_iterations_value_error_less_than_or_equal_zero(self):
with pytest.raises(ValueError,
match="can not less than or equal to 0"):
EPLBParamUtils.check_iterations(0)
with pytest.raises(ValueError,
match="can not less than or equal to 0"):
EPLBParamUtils.check_iterations(-1)
def test_check_iterations_value_error_large_than_sys_maxsize(self):
large_value = sys.maxsize + 1
with pytest.raises(ValueError,
match=f"can not large than {sys.maxsize}"):
EPLBParamUtils.check_iterations(large_value)
def test_check_dynamic_eplb_none(self):
EPLBParamUtils.check_dynamic_eplb(None)
def test_check_dynamic_eplb_valid_bool(self):
EPLBParamUtils.check_dynamic_eplb(False)
def test_check_dynamic_eplb_type_error(self):
with pytest.raises(TypeError, match="The dynamic_eplb is not bool."):
EPLBParamUtils.check_dynamic_eplb("true")
with pytest.raises(TypeError, match="The dynamic_eplb is not bool."):
EPLBParamUtils.check_dynamic_eplb(1)
def test_check_dynamic_eplb_value_error_env_not_set(self, monkeypatch):
monkeypatch.delenv("DYNAMIC_EPLB", raising=False)
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when not export DYNAMIC_EPLB="true".'
):
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "false")
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when not export DYNAMIC_EPLB="true".'
):
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "any_other_value")
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when not export DYNAMIC_EPLB="true".'
):
EPLBParamUtils.check_dynamic_eplb(True)
def test_check_dynamic_eplb_valid_with_env_set(self, monkeypatch):
monkeypatch.setenv("DYNAMIC_EPLB", "true")
EPLBParamUtils.check_dynamic_eplb(True)
def test_check_expert_map_path_none(self):
EPLBParamUtils.check_expert_map_path(None)
def test_check_expert_map_path_type_error_not_string(self):
with pytest.raises(TypeError, match="The expert_map is not str."):
EPLBParamUtils.check_expert_map_path(123)
with pytest.raises(TypeError, match="The expert_map is not str."):
EPLBParamUtils.check_expert_map_path(True)
def test_check_expert_map_path_value_error_empty_string(self):
with pytest.raises(ValueError, match="The expert_map is not empty."):
EPLBParamUtils.check_expert_map_path("")
with pytest.raises(ValueError, match="The expert_map is not empty."):
EPLBParamUtils.check_expert_map_path(" ")
def test_check_expert_map_path_type_error_incorrect_extension(self):
with pytest.raises(TypeError, match="The expert_map is not json."):
EPLBParamUtils.check_expert_map_path("path/to/map.txt")
with pytest.raises(TypeError, match="The expert_map is not json."):
EPLBParamUtils.check_expert_map_path("path/to/map.JSON_")
@patch('os.path.exists', return_value=False)
def test_check_expert_map_path_value_error_not_exist(self, mock_exists):
with pytest.raises(ValueError, match="The expert_map is not exist."):
EPLBParamUtils.check_expert_map_path("non_existent_map.json")
mock_exists.assert_called_once_with("non_existent_map.json")
def test_check_expert_map_record_path_none(self):
EPLBParamUtils.check_expert_map_record_path(None)
def test_check_expert_map_record_path_type_error_not_string(self):
with pytest.raises(TypeError,
match="The expert_map_record_path is not str."):
EPLBParamUtils.check_expert_map_record_path(123)
with pytest.raises(TypeError,
match="The expert_map_record_path is not str."):
EPLBParamUtils.check_expert_map_record_path(False)
def test_check_expert_map_record_path_value_error_empty_string(self):
with pytest.raises(ValueError,
match="The expert_map_record_path is empty."):
EPLBParamUtils.check_expert_map_record_path("")
with pytest.raises(ValueError,
match="The expert_map_record_path is empty."):
EPLBParamUtils.check_expert_map_record_path(" ")
def test_check_expert_map_record_path_type_error_incorrect_extension(self):
with pytest.raises(TypeError,
match="The expert_map_record_path is not json."):
EPLBParamUtils.check_expert_map_record_path("path/to/record.txt")
with pytest.raises(TypeError,
match="The expert_map_record_path is not json."):
EPLBParamUtils.check_expert_map_record_path("path/to/record.XML")
def test_check_expert_map_record_path_value_error_env_not_set(
self, monkeypatch):
monkeypatch.delenv("EXPERT_MAP_RECORD", raising=False)
with pytest.raises(
ValueError,
match=
'Can not enable expert_map_record_path when not export EXPERT_MAP_RECORD="true".'
):
EPLBParamUtils.check_expert_map_record_path("path/to/record.json")
monkeypatch.setenv("EXPERT_MAP_RECORD", "false")
with pytest.raises(
ValueError,
match=
'Can not enable expert_map_record_path when not export EXPERT_MAP_RECORD="true".'
):
EPLBParamUtils.check_expert_map_record_path("path/to/record.json")

View File

@@ -1010,6 +1010,7 @@ def mock_string_to_int64_hash(s):
return hash(s)
@unittest.skip("skip")
class TestMooncakeConnectorWorker(unittest.TestCase):
def setUp(self):
@@ -1063,6 +1064,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
for p in self.patches:
p.stop() # type: ignore
@unittest.skip("skip")
def test_worker_use_ascend_direct(self):
test_case = [True, False]
@@ -1103,6 +1105,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
config, self.engine_id)
self.assertIsNotNone(worker)
@unittest.skip("skip")
def test_register_kv_caches_producer(self):
worker = MooncakeConnectorWorker(self.vllm_config, self.engine_id)
worker.register_kv_caches(self.kv_caches)
@@ -1110,6 +1113,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
self.assertIsNotNone(worker.kv_send_thread)
self.assertIsNone(worker.kv_recv_thread)
@unittest.skip("skip")
def test_register_kv_caches_consumer(self):
self.vllm_config.kv_transfer_config.kv_role = 'kv_consumer'
worker = MooncakeConnectorWorker(self.vllm_config, self.engine_id)
@@ -1117,6 +1121,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
self.assertIsNone(worker.kv_send_thread)
self.assertIsNotNone(worker.kv_recv_thread)
@unittest.skip("skip")
def test_register_kv_caches_mla_case(self):
mla_cache1 = MagicMock()
mla_cache1.size.return_value = (10, 16, 1, 16)
@@ -1129,6 +1134,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
self.assertTrue(worker.use_mla)
self.assertEqual(len(worker.block_len), 2)
@unittest.skip("skip")
def test_device_id_selection_with_physical_devices(self):
# Test with physical devices set
worker = MooncakeConnectorWorker(self.vllm_config, self.engine_id)