### What this PR does / why we need it? This PR refactors the nightly single-node model test by migrating test configurations from Python scripts to a more maintainable `YAML-based` format. | Original PR | Python (`.py`) | YAML (`.yaml`) | | :--- | :--- | :--- | | [#3568](https://github.com/vllm-project/vllm-ascend/pull/3568) | `test_deepseek_r1_0528_w8a8_eplb.py` | `DeepSeek-R1-0528-W8A8.yaml` | | [#3631](https://github.com/vllm-project/vllm-ascend/pull/3631) | `test_deepseek_r1_0528_w8a8.py` | `DeepSeek-R1-0528-W8A8.yaml` | | [#5874](https://github.com/vllm-project/vllm-ascend/pull/5874) | `test_deepseek_r1_w8a8_hbm.py` | `DeepSeek-R1-W8A8-HBM.yaml` | | [#3908](https://github.com/vllm-project/vllm-ascend/pull/3908) | `test_deepseek_v3_2_w8a8.py` | `DeepSeek-V3.2-W8A8.yaml` | | [#5682](https://github.com/vllm-project/vllm-ascend/pull/5682) | `test_kimi_k2_thinking.py` | `Kimi-K2-Thinking.yaml` | | [#4111](https://github.com/vllm-project/vllm-ascend/pull/4111) | `test_mtpx_deepseek_r1_0528_w8a8.py` | `MTPX-DeepSeek-R1-0528-W8A8.yaml` | | [#3733](https://github.com/vllm-project/vllm-ascend/pull/3733) | `test_prefix_cache_deepseek_r1_0528_w8a8.py` | `Prefix-Cache-DeepSeek-R1-0528-W8A8.yaml` | | [#6543](https://github.com/vllm-project/vllm-ascend/pull/6543) | `test_qwen3_235b_w8a8.py` | `Qwen3-235B-A22B-W8A8.yaml` | | [#6543](https://github.com/vllm-project/vllm-ascend/pull/6543) | `test_qwen3_235b_a22b_w8a8_eplb.py` | `Qwen3-235B-A22B-W8A8.yaml` | | [#3973](https://github.com/vllm-project/vllm-ascend/pull/3973) | `test_qwen3_30b_w8a8.py` | `Qwen3-30B-A3B-W8A8.yaml` | | [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) | `test_qwen3_32b_int8.py` | `Qwen3-32B-Int8.yaml` | | [#3757](https://github.com/vllm-project/vllm-ascend/pull/3757) | `test_qwq_32b.py` | `QwQ-32B.yaml` | | [#5616](https://github.com/vllm-project/vllm-ascend/pull/5616) | `test_qwen3_next_w8a8.py` | `Qwen3-Next-80B-A3B-Instruct-W8A8.yaml` | | [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) | `test_qwen2_5_vl_7b.py` | `Qwen2.5-VL-7B-Instruct.yaml` | | [#5301](https://github.com/vllm-project/vllm-ascend/pull/5301) | `test_qwen2_5_vl_7b_epd.py` | `Qwen2.5-VL-7B-Instruct-EPD.yaml` | | [#3707](https://github.com/vllm-project/vllm-ascend/pull/3707) | `test_qwen2_5_vl_32b.py` | `Qwen2.5-VL-32B-Instruct.yaml` | | [#3676](https://github.com/vllm-project/vllm-ascend/pull/3676) | `test_qwen3_32b_int8_a3_feature_stack3.py` | `Qwen3-32B-Int8-A3-Feature-Stack3.yaml` | | [#3709](https://github.com/vllm-project/vllm-ascend/pull/3709) | `test_prefix_cache_qwen3_32b_int8.py` | `Prefix-Cache-Qwen3-32B-Int8.yaml` | | [#5395](https://github.com/vllm-project/vllm-ascend/pull/5395) | `test_qwen3_next.py` | `Qwen3-Next-80B-A3B-Instruct-A2.yaml` | | [#3474](https://github.com/vllm-project/vllm-ascend/pull/3474) | `test_qwen3_32b.py` | `Qwen3-32B.yaml` | | [#3541](https://github.com/vllm-project/vllm-ascend/pull/3541) | `test_qwen3_32b_int8.py` | `Qwen3-32B-Int8-A2.yaml` | ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 --------- Signed-off-by: MrZ20 <2609716663@qq.com>
93 lines
2.7 KiB
YAML
93 lines
2.7 KiB
YAML
# ==========================================
|
|
# ACTUAL TEST CASES
|
|
# ==========================================
|
|
|
|
test_cases:
|
|
- name: "Qwen2.5-VL-7B-Instruct-epd"
|
|
model: "Qwen/Qwen2.5-VL-7B-Instruct"
|
|
service_mode: "epd"
|
|
envs:
|
|
ENCODE_PORT: "DEFAULT_PORT"
|
|
PD_PORT: "DEFAULT_PORT"
|
|
PROXY_PORT: "DEFAULT_PORT"
|
|
epd_server_cmds:
|
|
- - "--port"
|
|
- "$ENCODE_PORT"
|
|
- "--model"
|
|
- "Qwen/Qwen2.5-VL-7B-Instruct"
|
|
- "--gpu-memory-utilization"
|
|
- "0.01"
|
|
- "--tensor-parallel-size"
|
|
- "1"
|
|
- "--enforce-eager"
|
|
- "--no-enable-prefix-caching"
|
|
- "--max-model-len"
|
|
- "10000"
|
|
- "--max-num-batched-tokens"
|
|
- "10000"
|
|
- "--max-num-seqs"
|
|
- "1"
|
|
- "--ec-transfer-config"
|
|
- '{"ec_connector_extra_config":{"shared_storage_path":"/dev/shm/epd/storage"},"ec_connector":"ECExampleConnector","ec_role": "ec_producer"}'
|
|
- - "--port"
|
|
- "$PD_PORT"
|
|
- "--model"
|
|
- "Qwen/Qwen2.5-VL-7B-Instruct"
|
|
- "--gpu-memory-utilization"
|
|
- "0.95"
|
|
- "--tensor-parallel-size"
|
|
- "1"
|
|
- "--enforce-eager"
|
|
- "--max-model-len"
|
|
- "10000"
|
|
- "--max-num-batched-tokens"
|
|
- "10000"
|
|
- "--max-num-seqs"
|
|
- "128"
|
|
- "--ec-transfer-config"
|
|
- '{"ec_connector_extra_config":{"shared_storage_path":"/dev/shm/epd/storage"},"ec_connector":"ECExampleConnector","ec_role": "ec_consumer"}'
|
|
epd_proxy_args:
|
|
- "--host"
|
|
- "127.0.0.1"
|
|
- "--port"
|
|
- "$PROXY_PORT"
|
|
- "--encode-servers-urls"
|
|
- "http://localhost:$ENCODE_PORT"
|
|
- "--decode-servers-urls"
|
|
- "http://localhost:$PD_PORT"
|
|
- "--prefill-servers-urls"
|
|
- "disable"
|
|
test_content:
|
|
benchmarks:
|
|
warm_up:
|
|
case_type: performance
|
|
dataset_path: vllm-ascend/textvqa-perf-1080p
|
|
request_conf: vllm_api_stream_chat
|
|
dataset_conf: textvqa/textvqa_gen_base64
|
|
num_prompts: 50
|
|
max_out_len: 20
|
|
batch_size: 32
|
|
request_rate: 0
|
|
baseline: 1
|
|
threshold: 0.97
|
|
acc:
|
|
case_type: accuracy
|
|
dataset_path: vllm-ascend/textvqa-lite
|
|
request_conf: vllm_api_stream_chat
|
|
dataset_conf: textvqa/textvqa_gen_base64
|
|
max_out_len: 2048
|
|
batch_size: 128
|
|
baseline: 82.05
|
|
threshold: 5
|
|
perf:
|
|
case_type: performance
|
|
dataset_path: vllm-ascend/textvqa-perf-1080p
|
|
request_conf: vllm_api_stream_chat
|
|
dataset_conf: textvqa/textvqa_gen_base64
|
|
num_prompts: 512
|
|
max_out_len: 256
|
|
batch_size: 128
|
|
request_rate: 0
|
|
baseline: 1
|
|
threshold: 0.97
|