[Nightly][Refactor]Migrate nightly single-node model tests from .py to .yaml (#6503)
### What this PR does / why we need it? This PR refactors the nightly single-node model test by migrating test configurations from Python scripts to a more maintainable `YAML-based` format. | Original PR | Python (`.py`) | YAML (`.yaml`) | | :--- | :--- | :--- | | [#3568](https://github.com/vllm-project/vllm-ascend/pull/3568) | `test_deepseek_r1_0528_w8a8_eplb.py` | `DeepSeek-R1-0528-W8A8.yaml` | | [#3631](https://github.com/vllm-project/vllm-ascend/pull/3631) | `test_deepseek_r1_0528_w8a8.py` | `DeepSeek-R1-0528-W8A8.yaml` | | [#5874](https://github.com/vllm-project/vllm-ascend/pull/5874) | `test_deepseek_r1_w8a8_hbm.py` | `DeepSeek-R1-W8A8-HBM.yaml` | | [#3908](https://github.com/vllm-project/vllm-ascend/pull/3908) | `test_deepseek_v3_2_w8a8.py` | `DeepSeek-V3.2-W8A8.yaml` | | [#5682](https://github.com/vllm-project/vllm-ascend/pull/5682) | `test_kimi_k2_thinking.py` | `Kimi-K2-Thinking.yaml` | | [#4111](https://github.com/vllm-project/vllm-ascend/pull/4111) | `test_mtpx_deepseek_r1_0528_w8a8.py` | `MTPX-DeepSeek-R1-0528-W8A8.yaml` | | [#3733](https://github.com/vllm-project/vllm-ascend/pull/3733) | `test_prefix_cache_deepseek_r1_0528_w8a8.py` | `Prefix-Cache-DeepSeek-R1-0528-W8A8.yaml` | | [#6543](https://github.com/vllm-project/vllm-ascend/pull/6543) | `test_qwen3_235b_w8a8.py` | `Qwen3-235B-A22B-W8A8.yaml` | | [#6543](https://github.com/vllm-project/vllm-ascend/pull/6543) | `test_qwen3_235b_a22b_w8a8_eplb.py` | `Qwen3-235B-A22B-W8A8.yaml` | | [#3973](https://github.com/vllm-project/vllm-ascend/pull/3973) | `test_qwen3_30b_w8a8.py` | `Qwen3-30B-A3B-W8A8.yaml` | | [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) | `test_qwen3_32b_int8.py` | `Qwen3-32B-Int8.yaml` | | [#3757](https://github.com/vllm-project/vllm-ascend/pull/3757) | `test_qwq_32b.py` | `QwQ-32B.yaml` | | [#5616](https://github.com/vllm-project/vllm-ascend/pull/5616) | `test_qwen3_next_w8a8.py` | `Qwen3-Next-80B-A3B-Instruct-W8A8.yaml` | | [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) | `test_qwen2_5_vl_7b.py` | `Qwen2.5-VL-7B-Instruct.yaml` | | [#5301](https://github.com/vllm-project/vllm-ascend/pull/5301) | `test_qwen2_5_vl_7b_epd.py` | `Qwen2.5-VL-7B-Instruct-EPD.yaml` | | [#3707](https://github.com/vllm-project/vllm-ascend/pull/3707) | `test_qwen2_5_vl_32b.py` | `Qwen2.5-VL-32B-Instruct.yaml` | | [#3676](https://github.com/vllm-project/vllm-ascend/pull/3676) | `test_qwen3_32b_int8_a3_feature_stack3.py` | `Qwen3-32B-Int8-A3-Feature-Stack3.yaml` | | [#3709](https://github.com/vllm-project/vllm-ascend/pull/3709) | `test_prefix_cache_qwen3_32b_int8.py` | `Prefix-Cache-Qwen3-32B-Int8.yaml` | | [#5395](https://github.com/vllm-project/vllm-ascend/pull/5395) | `test_qwen3_next.py` | `Qwen3-Next-80B-A3B-Instruct-A2.yaml` | | [#3474](https://github.com/vllm-project/vllm-ascend/pull/3474) | `test_qwen3_32b.py` | `Qwen3-32B.yaml` | | [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) | `test_qwen3_32b_int8.py` | `Qwen3-32B-Int8-A2.yaml` | ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 --------- Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
@@ -0,0 +1,78 @@
|
||||
# ==========================================
|
||||
# Shared Configurations
|
||||
# ==========================================
|
||||
|
||||
_envs: &envs
|
||||
TASK_QUEUE_ENABLE: "1"
|
||||
HCCL_OP_EXPANSION_MODE: "AIV"
|
||||
VLLM_ASCEND_ENABLE_FLASHCOMM: "1"
|
||||
SERVER_PORT: "DEFAULT_PORT"
|
||||
|
||||
_server_cmd: &server_cmd
|
||||
- "--quantization"
|
||||
- "ascend"
|
||||
- "--no-enable-prefix-caching"
|
||||
- "--tensor-parallel-size"
|
||||
- "4"
|
||||
- "--port"
|
||||
- "$SERVER_PORT"
|
||||
- "--max-model-len"
|
||||
- "40960"
|
||||
- "--max-num-batched-tokens"
|
||||
- "40960"
|
||||
- "--block-size"
|
||||
- "128"
|
||||
- "--trust-remote-code"
|
||||
- "--reasoning-parser"
|
||||
- "qwen3"
|
||||
- "--gpu-memory-utilization"
|
||||
- "0.9"
|
||||
- "--async-scheduling"
|
||||
- "--additional-config"
|
||||
- '{"weight_prefetch_config":{"enabled":true}}'
|
||||
|
||||
_benchmarks: &benchmarks
|
||||
acc:
|
||||
case_type: accuracy
|
||||
dataset_path: vllm-ascend/aime2024
|
||||
request_conf: vllm_api_general_chat
|
||||
dataset_conf: aime2024/aime2024_gen_0_shot_chat_prompt
|
||||
max_out_len: 32768
|
||||
batch_size: 32
|
||||
baseline: 83.33
|
||||
threshold: 7
|
||||
perf:
|
||||
case_type: performance
|
||||
dataset_path: vllm-ascend/GSM8K-in3500-bs400
|
||||
request_conf: vllm_api_stream_chat
|
||||
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||
num_prompts: 304
|
||||
max_out_len: 1500
|
||||
batch_size: 76
|
||||
baseline: 1
|
||||
threshold: 0.97
|
||||
|
||||
# ==========================================
|
||||
# ACTUAL TEST CASES
|
||||
# ==========================================
|
||||
|
||||
test_cases:
|
||||
- name: "Qwen3-32B-W8A8-aclgraph-a3"
|
||||
model: "vllm-ascend/Qwen3-32B-W8A8"
|
||||
envs:
|
||||
<<: *envs
|
||||
server_cmd: *server_cmd
|
||||
server_cmd_extra:
|
||||
- "--compilation-config"
|
||||
- '{"cudagraph_mode":"FULL_DECODE_ONLY","cudagraph_capture_sizes":[1,12,16,20,24,32,48,60,64,68,72,76,80]}'
|
||||
benchmarks:
|
||||
<<: *benchmarks
|
||||
|
||||
- name: "Qwen3-32B-W8A8-single-a3"
|
||||
model: "vllm-ascend/Qwen3-32B-W8A8"
|
||||
envs:
|
||||
<<: *envs
|
||||
server_cmd: *server_cmd
|
||||
server_cmd_extra:
|
||||
- "--enforce-eager"
|
||||
benchmarks:
|
||||
Reference in New Issue
Block a user