Files
xc-llm-ascend/tests/e2e/nightly/single_node/models/configs/GLM-5.yaml
yangjiuhua b717dc17a3 [v0.18.0][Test][Misc] Update CI for GLM-5 configuration on vllm-ascend/releases/v0.18.0 branch (#8322)
<!--  Thanks for sending a pull request!

BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html

-->
### What this PR does / why we need it?
Update CI for GLM-5 configuration on vllm-ascend/releases/v0.18.0 branch
在0.18.0版本上对glm5-w4a8做测试

### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->

### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->

---------

Signed-off-by: yangjiuhua <y00845194@china.huawei.com>
Co-authored-by: yangjiuhua <y00845194@china.huawei.com>
2026-04-21 14:10:11 +08:00

79 lines
2.0 KiB
YAML

# ==========================================
# Shared Configurations
# ==========================================
_envs: &envs
HCCL_BUFFSIZE: "1024"
SERVER_PORT: "DEFAULT_PORT"
HCCL_OP_EXPANSION_MODE: "AIV"
OMP_PROC_BIND: "false"
OMP_NUM_THREADS: "1"
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
_server_cmd: &server_cmd
- "--enable-expert-parallel"
- "--tensor-parallel-size"
- "16"
- "--data-parallel-size"
- "1"
- "--port"
- "$SERVER_PORT"
- "--max-model-len"
- "16384"
- "--max-num-batched-tokens"
- "4096"
- "--trust-remote-code"
- "--gpu-memory-utilization"
- "0.95"
- "--max-num-seqs"
- "8"
- "--quantization"
- "ascend"
- "--async-scheduling"
- "--additional-config"
- '{"fuse_muls_add": true, "multistream_overlap_shared_expert": false, "ascend_compilation_config": {"enable_npugraph_ex": true}}'
- "--speculative-config"
- '{"num_speculative_tokens": 3, "method": "deepseek_mtp"}'
_special_dependencies: &special_dependencies
transformers: "5.2.0"
_benchmarks: &benchmarks
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 8192
batch_size: 8
baseline: 95
threshold: 5
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 16
max_out_len: 1500
batch_size: 8
request_rate: 0
baseline: 1
threshold: 0.97
# ==========================================
# ACTUAL TEST CASES
# ==========================================
test_cases:
- name: "GLM-5-TP16-DP1-decodegraph"
model: "Eco-Tech/GLM-5-w4a8"
special_dependencies: *special_dependencies
envs:
<<: *envs
server_cmd: *server_cmd
server_cmd_extra:
- "--compilation-config"
- '{"cudagraph_capture_sizes": [4,8,16,32,64,128,256,512], "cudagraph_mode": "FULL_DECODE_ONLY"}'
benchmarks:
<<: *benchmarks