[TEST]Add aisbench log and A2 cases (#3841)
### What this PR does / why we need it? This PR adds 2 more A2 caces which we need to test daily. It also enhances the logging for aisbench test failures to improve issues identification ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? By running the test - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/releases/v0.11.1 --------- Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
This commit is contained in:
@@ -73,6 +73,9 @@ jobs:
|
|||||||
- name: multi-node-deepseek-dp
|
- name: multi-node-deepseek-dp
|
||||||
config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-R1-W8A8-A2.yaml
|
config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-R1-W8A8-A2.yaml
|
||||||
size: 2
|
size: 2
|
||||||
|
- name: multi-node-deepseek-dp-torchair
|
||||||
|
config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-R1-W8A8-A2-torchair.yaml
|
||||||
|
size: 2
|
||||||
uses: ./.github/workflows/_e2e_nightly_multi_node.yaml
|
uses: ./.github/workflows/_e2e_nightly_multi_node.yaml
|
||||||
with:
|
with:
|
||||||
soc_version: a2
|
soc_version: a2
|
||||||
|
|||||||
@@ -133,4 +133,7 @@ async def test_models(model: str, mode: str) -> None:
|
|||||||
if mode in ["single", "no_chunkprefill"]:
|
if mode in ["single", "no_chunkprefill"]:
|
||||||
return
|
return
|
||||||
# aisbench test
|
# aisbench test
|
||||||
run_aisbench_cases(model, port, aisbench_cases)
|
run_aisbench_cases(model,
|
||||||
|
port,
|
||||||
|
aisbench_cases,
|
||||||
|
server_args=server_args)
|
||||||
|
|||||||
@@ -0,0 +1,64 @@
|
|||||||
|
test_name: "test DeepSeek-R1-W8A8 torchair on A2"
|
||||||
|
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
|
||||||
|
num_nodes: 2
|
||||||
|
npu_per_node: 8
|
||||||
|
env_common:
|
||||||
|
VLLM_USE_MODELSCOPE: true
|
||||||
|
HCCL_BUFFSIZE: 1024
|
||||||
|
SERVER_PORT: 8080
|
||||||
|
OMP_PROC_BIND: false
|
||||||
|
OMP_NUM_THREADS: 10
|
||||||
|
|
||||||
|
|
||||||
|
deployment:
|
||||||
|
-
|
||||||
|
server_cmd: >
|
||||||
|
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
||||||
|
--host 0.0.0.0
|
||||||
|
--port $SERVER_PORT
|
||||||
|
--data-parallel-size 4
|
||||||
|
--data-parallel-size-local 2
|
||||||
|
--data-parallel-address $LOCAL_IP
|
||||||
|
--data-parallel-rpc-port 13399
|
||||||
|
--no-enable-prefix-caching
|
||||||
|
--max-num-seqs 16
|
||||||
|
--tensor-parallel-size 4
|
||||||
|
--max-model-len 36864
|
||||||
|
--max-num-batched-tokens 6000
|
||||||
|
--enable-expert-parallel
|
||||||
|
--trust-remote-code
|
||||||
|
--quantization ascend
|
||||||
|
--gpu-memory-utilization 0.9
|
||||||
|
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||||
|
--additional-config '{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||||
|
|
||||||
|
-
|
||||||
|
server_cmd: >
|
||||||
|
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
||||||
|
--headless
|
||||||
|
--data-parallel-size 4
|
||||||
|
--data-parallel-rpc-port 13399
|
||||||
|
--data-parallel-size-local 2
|
||||||
|
--data-parallel-start-rank 2
|
||||||
|
--data-parallel-address $MASTER_IP
|
||||||
|
--no-enable-prefix-caching
|
||||||
|
--max-num-seqs 16
|
||||||
|
--tensor-parallel-size 4
|
||||||
|
--max-model-len 36864
|
||||||
|
--max-num-batched-tokens 6000
|
||||||
|
--enable-expert-parallel
|
||||||
|
--trust-remote-code
|
||||||
|
--quantization ascend
|
||||||
|
--gpu-memory-utilization 0.9
|
||||||
|
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||||
|
--additional-config '{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||||
|
benchmarks:
|
||||||
|
acc:
|
||||||
|
case_type: accuracy
|
||||||
|
dataset_path: vllm-ascend/gsm8k
|
||||||
|
request_conf: vllm_api_general_chat
|
||||||
|
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
|
||||||
|
max_out_len: 32768
|
||||||
|
batch_size: 512
|
||||||
|
baseline: 95
|
||||||
|
threshold: 5
|
||||||
@@ -30,7 +30,7 @@ deployment:
|
|||||||
--quantization ascend
|
--quantization ascend
|
||||||
--gpu-memory-utilization 0.9
|
--gpu-memory-utilization 0.9
|
||||||
--enforce-eager
|
--enforce-eager
|
||||||
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}' \
|
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||||
--additional-config '{"ascend_scheduler_config":{"enabled":false},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
--additional-config '{"ascend_scheduler_config":{"enabled":false},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||||
|
|
||||||
-
|
-
|
||||||
@@ -52,6 +52,6 @@ deployment:
|
|||||||
--quantization ascend
|
--quantization ascend
|
||||||
--gpu-memory-utilization 0.9
|
--gpu-memory-utilization 0.9
|
||||||
--enforce-eager
|
--enforce-eager
|
||||||
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}' \
|
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
||||||
--additional-config '{"ascend_scheduler_config":{"enabled":false},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
--additional-config '{"ascend_scheduler_config":{"enabled":false},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
||||||
benchmarks:
|
benchmarks:
|
||||||
|
|||||||
@@ -117,9 +117,7 @@ async def test_multi_node() -> None:
|
|||||||
if config.is_master:
|
if config.is_master:
|
||||||
port = proxy_port if disaggregated_prefill else server_port
|
port = proxy_port if disaggregated_prefill else server_port
|
||||||
# aisbench test
|
# aisbench test
|
||||||
if acc_cmd:
|
aisbench_cases = [acc_cmd, perf_cmd]
|
||||||
run_aisbench_cases(local_model_path, port, acc_cmd)
|
run_aisbench_cases(local_model_path, port, aisbench_cases)
|
||||||
if perf_cmd:
|
|
||||||
run_aisbench_cases(local_model_path, port, perf_cmd)
|
|
||||||
else:
|
else:
|
||||||
remote_server.hang_until_terminated()
|
remote_server.hang_until_terminated()
|
||||||
|
|||||||
@@ -16,6 +16,7 @@
|
|||||||
#
|
#
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
@@ -188,8 +189,8 @@ class AisbenchRunner:
|
|||||||
line).group(1)
|
line).group(1)
|
||||||
return
|
return
|
||||||
if "ERROR" in line:
|
if "ERROR" in line:
|
||||||
raise RuntimeError(
|
error_msg = f"Some errors happened to Aisbench runtime, the first error is {line}"
|
||||||
"Some errors happen to Aisbench task.") from None
|
raise RuntimeError(error_msg) from None
|
||||||
|
|
||||||
def _wait_for_task(self):
|
def _wait_for_task(self):
|
||||||
self._wait_for_exp_folder()
|
self._wait_for_exp_folder()
|
||||||
@@ -201,8 +202,8 @@ class AisbenchRunner:
|
|||||||
self.result_line = line
|
self.result_line = line
|
||||||
return
|
return
|
||||||
if "ERROR" in line:
|
if "ERROR" in line:
|
||||||
raise RuntimeError(
|
error_msg = f"Some errors happened to Aisbench runtime, the first error is {line}"
|
||||||
"Some errors happen to Aisbench task.") from None
|
raise RuntimeError(error_msg) from None
|
||||||
|
|
||||||
def _get_result_performance(self):
|
def _get_result_performance(self):
|
||||||
result_dir = re.search(r'Performance Result files locate in (.*)',
|
result_dir = re.search(r'Performance Result files locate in (.*)',
|
||||||
@@ -237,12 +238,12 @@ class AisbenchRunner:
|
|||||||
assert self.baseline - self.threshold <= acc_value <= self.baseline + self.threshold, f"Accuracy verification failed. The accuracy of {self.dataset_path} is {acc_value}, which is not within {self.threshold} relative to baseline {self.baseline}."
|
assert self.baseline - self.threshold <= acc_value <= self.baseline + self.threshold, f"Accuracy verification failed. The accuracy of {self.dataset_path} is {acc_value}, which is not within {self.threshold} relative to baseline {self.baseline}."
|
||||||
|
|
||||||
|
|
||||||
def run_aisbench_cases(model, port, aisbench_cases):
|
def run_aisbench_cases(model, port, aisbench_cases, server_args=""):
|
||||||
if isinstance(aisbench_cases, dict):
|
|
||||||
aisbench_cases = [aisbench_cases]
|
|
||||||
aisbench_results = []
|
aisbench_results = []
|
||||||
aisbench_errors = []
|
aisbench_errors = []
|
||||||
for aisbench_case in aisbench_cases:
|
for aisbench_case in aisbench_cases:
|
||||||
|
if not aisbench_case:
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
with AisbenchRunner(model, port, aisbench_case) as aisbench:
|
with AisbenchRunner(model, port, aisbench_case) as aisbench:
|
||||||
aisbench_results.append(aisbench.result)
|
aisbench_results.append(aisbench.result)
|
||||||
@@ -251,9 +252,10 @@ def run_aisbench_cases(model, port, aisbench_cases):
|
|||||||
aisbench_errors.append([aisbench_case, e])
|
aisbench_errors.append([aisbench_case, e])
|
||||||
print(e)
|
print(e)
|
||||||
for failed_case, error_info in aisbench_errors:
|
for failed_case, error_info in aisbench_errors:
|
||||||
print(
|
error_msg = f"The following aisbench case failed: {failed_case}, reason is {error_info}"
|
||||||
f"The following aisbench case failed: {failed_case}, reason is {error_info}."
|
if server_args:
|
||||||
)
|
error_msg += f"\nserver_args are {server_args}"
|
||||||
|
logging.error(error_msg)
|
||||||
assert not aisbench_errors, "some aisbench cases failed, info were shown above."
|
assert not aisbench_errors, "some aisbench cases failed, info were shown above."
|
||||||
return aisbench_results
|
return aisbench_results
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user