[TEST]Update nightly cases and add mtpx (#4111)

### What this PR does / why we need it?
This PR updates some nightly test cases and adds mtpx cases, we need to
test them daily
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
By running the test

- vLLM version: v0.11.0
- vLLM main:
83f478bb19

---------

Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
This commit is contained in:
jiangyunfan1
2025-11-11 17:39:58 +08:00
committed by GitHub
parent 9cc42226d5
commit 0e6e08e939
8 changed files with 249 additions and 66 deletions

View File

@@ -28,8 +28,6 @@ MODELS = [
"vllm-ascend/DeepSeek-R1-W8A8",
]
MODES = ["eplb"]
prompts = [
"San Francisco is a",
]
@@ -38,51 +36,69 @@ api_keyword_args = {
"max_tokens": 10,
}
aisbench_gsm8k = [{
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"top_k": 20,
"baseline": 95,
"threshold": 5
}]
mode_aisbench = {"eplb": aisbench_gsm8k}
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("mode", MODES)
async def test_models(model: str, mode: str) -> None:
async def test_models(model: str) -> None:
port = get_open_port()
env_dict = {
"OMP_NUM_THREADS": "10",
"OMP_NUM_THREADS": "100",
"OMP_PROC_BIND": "false",
"HCCL_BUFFSIZE": "1024",
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True",
"VLLM_ASCEND_ENABLE_FLASHCOMM1": "1"
"HCCL_BUFFSIZE": "200",
"VLLM_ASCEND_ENABLE_MLAPO": "1",
"VLLM_RPC_TIMEOUT": "3600000",
"VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": "3600000",
"DISABLE_L2_CACHE": "1",
"DYNAMIC_EPLB": "true",
}
speculative_config = {
"num_speculative_tokens": 1,
"method": "deepseek_mtp"
}
compilation_config = {
"cudagraph_capture_sizes": [24],
"cudagraph_mode": "FULL_DECODE_ONLY"
}
additional_config: dict[str, Any] = {
"ascend_scheduler_config": {
"enabled": False
},
"torchair_graph_config": {
"enabled": True
},
"enable_shared_expert_dp": False,
"multistream_overlap_shared_expert": False,
"dynamic_eplb": True,
"num_iterations_eplb_update": 14000,
"num_wait_worker_iterations": 30,
"init_redundancy_expert": 0,
"gate_eplb": False
}
server_args = [
"--quantization", "ascend", "--async-scheduling",
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
"--enable-expert-parallel", "--port",
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
"8192", "--max-num-seqs", "12", "--trust-remote-code",
"--gpu-memory-utilization", "0.9"
"--quantization", "ascend", "--seed", "1024",
"--no-enable-prefix-caching", "--data-parallel-size", "4",
"--tensor-parallel-size", "4", "--enable-expert-parallel", "--port",
str(port), "--max-model-len", "40000", "--max-num-batched-tokens",
"4096", "--max-num-seqs", "12", "--trust-remote-code",
"--gpu-memory-utilization", "0.92"
]
if mode == "eplb":
env_dict["DYNAMIC_EPLB"] = "true"
additional_config["dynamic_eplb"] = True
additional_config["num_iterations_eplb_update"] = 2048
additional_config["num_wait_worker_iterations"] = 200
server_args.extend(
["--speculative-config",
json.dumps(speculative_config)])
server_args.extend(
["--compilation-config",
json.dumps(compilation_config)])
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
@@ -102,7 +118,6 @@ async def test_models(model: str, mode: str) -> None:
assert choices[0].text, "empty response"
print(choices)
# aisbench test
aisbench_cases = mode_aisbench[mode]
run_aisbench_cases(model,
port,
aisbench_cases,

View File

@@ -28,8 +28,6 @@ MODELS = [
"vllm-ascend/Qwen3-235B-A22B-W8A8",
]
MODES = ["eplb"]
prompts = [
"San Francisco is a",
]
@@ -38,7 +36,7 @@ api_keyword_args = {
"max_tokens": 10,
}
aisbench_gsm8k = [{
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
@@ -47,17 +45,13 @@ aisbench_gsm8k = [{
"batch_size": 32,
"top_k": 20,
"baseline": 95,
"threshold": 5,
"topk": 20
"threshold": 5
}]
mode_aisbench = {"eplb": aisbench_gsm8k}
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("mode", MODES)
async def test_models(model: str, mode: str) -> None:
async def test_models(model: str) -> None:
port = get_open_port()
env_dict = {
"OMP_NUM_THREADS": "10",
@@ -71,6 +65,7 @@ async def test_models(model: str, mode: str) -> None:
"enabled": False
},
}
compilation_config = {"cudagraph_mode": "FULL_DECODE_ONLY"}
server_args = [
"--quantization", "ascend", "--async-scheduling",
"--data-parallel-size", "4", "--tensor-parallel-size", "4",
@@ -79,11 +74,16 @@ async def test_models(model: str, mode: str) -> None:
"8192", "--max-num-seqs", "12", "--trust-remote-code",
"--gpu-memory-utilization", "0.9"
]
if mode == "eplb":
env_dict["DYNAMIC_EPLB"] = "true"
additional_config["dynamic_eplb"] = True
additional_config["num_iterations_eplb_update"] = 2048
additional_config["num_wait_worker_iterations"] = 200
env_dict["EXPERT_MAP_RECORD"] = "true"
env_dict["DYNAMIC_EPLB"] = "true"
additional_config["dynamic_eplb"] = True
additional_config["num_iterations_eplb_update"] = 14000
additional_config["num_wait_worker_iterations"] = 30
additional_config["init_redundancy_expert"] = 0
additional_config["gate_eplb"] = False
server_args.extend(
["--compilation-config",
json.dumps(compilation_config)])
server_args.extend(["--additional-config", json.dumps(additional_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
@@ -103,7 +103,6 @@ async def test_models(model: str, mode: str) -> None:
assert choices[0].text, "empty response"
print(choices)
# aisbench test
aisbench_cases = mode_aisbench[mode]
run_aisbench_cases(model,
port,
aisbench_cases,

View File

@@ -14,6 +14,7 @@
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import json
import os
from typing import Any
@@ -44,8 +45,8 @@ api_keyword_args = {
}
batch_size_dict = {
"linux-aarch64-a2-4": 44,
"linux-aarch64-a3-4": 46,
"linux-aarch64-a2-4": 72,
"linux-aarch64-a3-4": 76,
}
VLLM_CI_RUNNER = os.getenv("VLLM_CI_RUNNER", "linux-aarch64-a2-4")
performance_batch_size = batch_size_dict.get(VLLM_CI_RUNNER, 1)
@@ -80,21 +81,32 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"VLLM_ASCEND_ENABLE_DENSE_OPTIMIZE": "1",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500"
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1"
}
compilation_config = {
"cudagraph_mode":
"FULL_DECODE_ONLY",
"cudagraph_capture_sizes":
[1, 12, 16, 20, 24, 32, 48, 60, 64, 68, 72, 76, 80]
}
server_args = [
"--quantization", "ascend", "--no-enable-prefix-caching",
"--tensor-parallel-size",
str(tp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--gpu-memory-utilization", "0.9", "--additional-config",
'{"enable_weight_nz_layout":true}'
str(port), "--max-model-len", "40960", "--max-num-batched-tokens",
"40960", "--block-size", "128", "--trust-remote-code",
"--reasoning-parser", "qwen3", "--gpu-memory-utilization", "0.9",
"--async-scheduling"
]
if mode == "single":
server_args.append("--enforce-eager")
if mode == "aclgraph":
server_args.extend(
["--compilation-config",
json.dumps(compilation_config)])
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}

View File

@@ -56,9 +56,9 @@ aisbench_cases = [{
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 176,
"num_prompts": 240,
"max_out_len": 1500,
"batch_size": 44,
"batch_size": 60,
"baseline": 1,
"threshold": 0.97
}]
@@ -75,9 +75,8 @@ async def test_models(model: str, mode: str, tp_size: int) -> None:
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"VLLM_ASCEND_ENABLE_FLASHCOMM": "1",
"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1",
"VLLM_ASCEND_ENABLE_DEBSE_OPTIMIZE": "1",
"VLLM_ASCEND_ENABLE_PREFETCH": "1"
"VLLM_ASCEND_ENABLE_PREFETCH_MLP": "1"
}
server_args = [
"--tensor-parallel-size",