[CI]Fix eplb ci. (#4052)

### What this PR does / why we need it?
This pr fixes ci on eplb

- vLLM version: v0.11.0
- vLLM main:
83f478bb19

---------

Signed-off-by: offline0806 <3337230449@qq.com>
Co-authored-by: offline0806 <3337230449@qq.com>
This commit is contained in:
offline893
2025-11-07 23:53:35 +08:00
committed by GitHub
parent e687d6af85
commit f7ca3bc0fa
2 changed files with 74 additions and 66 deletions

View File

@@ -14,6 +14,7 @@
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import json
from typing import Any
import openai
@@ -27,8 +28,7 @@ MODELS = [
"vllm-ascend/DeepSeek-R1-W8A8",
]
TENSOR_PARALLELS = [8]
DATA_PARALLELS = [2]
MODES = ["eplb"]
prompts = [
"San Francisco is a",
@@ -38,55 +38,52 @@ api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
aisbench_gsm8k = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 93,
"top_k": 20,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 80,
"max_out_len": 1500,
"batch_size": 20,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
mode_aisbench = {"eplb": aisbench_gsm8k}
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
@pytest.mark.parametrize("dp_size", DATA_PARALLELS)
async def test_models(model: str, tp_size: int, dp_size: int) -> None:
@pytest.mark.parametrize("mode", MODES)
async def test_models(model: str, mode: str) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_NUM_THREADS": "10",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500",
"DYNAMIC_EPLB": "true",
"HCCL_BUFFSIZE": "1024"
"HCCL_BUFFSIZE": "1024",
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
}
additional_config: dict[str, Any] = {
"ascend_scheduler_config": {
"enabled": False
},
}
server_args = [
"--no-enable-prefix-caching", "--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size), "--data-parallel-size",
str(dp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--quantization", "ascend", "--gpu-memory-utilization", "0.9",
"--additional-config", '{"enable_weight_nz_layout":true, '
'"torch_air_graph_config":{"enabled": true, "enable_multistream_mla": true, "graph_batch_size": [16], "use_cached_graph": true},'
'"dynamic_eplb": true, "num_iterations_eplb_update": 1000, "num_wait_worker_iterations": 200'
"--quantization", "ascend", "--async-scheduling",
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
"--enable-expert-parallel", "--port",
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
"8192", "--max-num-seqs", "12", "--trust-remote-code",
"--gpu-memory-utilization", "0.9"
]
if mode == "eplb":
env_dict["DYNAMIC_EPLB"] = "true"
additional_config["dynamic_eplb"] = True
additional_config["num_iterations_eplb_update"] = 2048
additional_config["num_wait_worker_iterations"] = 200
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
@@ -103,5 +100,10 @@ async def test_models(model: str, tp_size: int, dp_size: int) -> None:
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
print(choices)
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)
aisbench_cases = mode_aisbench[mode]
run_aisbench_cases(model,
port,
aisbench_cases,
server_args=server_args)

View File

@@ -14,6 +14,7 @@
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import json
from typing import Any
import openai
@@ -27,7 +28,7 @@ MODELS = [
"vllm-ascend/Qwen3-235B-A22B-W8A8",
]
TENSOR_PARALLELS = [16]
MODES = ["eplb"]
prompts = [
"San Francisco is a",
@@ -37,53 +38,53 @@ api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
aisbench_gsm8k = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 93,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 80,
"max_out_len": 1500,
"batch_size": 20,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
"top_k": 20,
"baseline": 95,
"threshold": 5,
"topk": 20
}]
mode_aisbench = {"eplb": aisbench_gsm8k}
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
@pytest.mark.parametrize("mode", MODES)
async def test_models(model: str, mode: str) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_NUM_THREADS": "10",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500",
"DYNAMIC_EPLB": "true",
"HCCL_BUFFSIZE": "1024"
"HCCL_BUFFSIZE": "1024",
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
}
additional_config: dict[str, Any] = {
"ascend_scheduler_config": {
"enabled": False
},
}
server_args = [
"--no-enable-prefix-caching", "--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--quantization", "ascend", "--gpu-memory-utilization", "0.9",
"--additional-config",
'{"enable_weight_nz_layout":true, "dynamic_eplb": true, '
'"num_iterations_eplb_update": 1000, "num_wait_worker_iterations": 200}'
"--quantization", "ascend", "--async-scheduling",
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
"--enable-expert-parallel", "--port",
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
"8192", "--max-num-seqs", "12", "--trust-remote-code",
"--gpu-memory-utilization", "0.9"
]
if mode == "eplb":
env_dict["DYNAMIC_EPLB"] = "true"
additional_config["dynamic_eplb"] = True
additional_config["num_iterations_eplb_update"] = 2048
additional_config["num_wait_worker_iterations"] = 200
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
@@ -100,5 +101,10 @@ async def test_models(model: str, tp_size: int) -> None:
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
print(choices)
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)
aisbench_cases = mode_aisbench[mode]
run_aisbench_cases(model,
port,
aisbench_cases,
server_args=server_args)