Fix accuracy test config and add DeepSeek-V2-Lite test (#2261)

### What this PR does / why we need it?
This PR fix accuracy test related to
https://github.com/vllm-project/vllm-ascend/pull/2073, users can now
perform accuracy tests on multiple models simultaneously and generate
different report files by running:

```bash
cd ~/vllm-ascend
pytest -sv ./tests/e2e/models/test_lm_eval_correctness.py \
          --config-list-file ./tests/e2e/models/configs/accuracy.txt
```

### Does this PR introduce _any_ user-facing change?
no

### How was this patch tested?
<img width="1648" height="511" alt="image"
src="https://github.com/user-attachments/assets/1757e3b8-a6b7-44e5-b701-80940dc756cd"
/>


- vLLM version: v0.10.0
- vLLM main:
766bc8162c

---------

Signed-off-by: Icey <1790571317@qq.com>
This commit is contained in:
Icey
2025-08-08 11:09:16 +08:00
committed by GitHub
parent ad1083761f
commit 0bd5ff5299
13 changed files with 46 additions and 418 deletions

View File

@@ -1,167 +0,0 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py
#
import gc
import multiprocessing
import sys
from multiprocessing import Queue
import lm_eval
import pytest
import torch
SERVER_HOST = "127.0.0.1"
SERVER_PORT = 8000
HEALTH_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/health"
COMPLETIONS_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/v1/completions"
# pre-trained model path on Hugging Face.
# Qwen/Qwen2.5-0.5B-Instruct: accuracy test for DP.
# Qwen/Qwen3-30B-A3B: accuracy test for EP and DP.
# deepseek-ai/DeepSeek-V2-Lite: accuracy test for TP.
MODEL_NAME = ["Qwen/Qwen3-30B-A3B", "deepseek-ai/DeepSeek-V2-Lite"]
# Benchmark configuration mapping models to evaluation tasks:
# - Text model: GSM8K (grade school math reasoning)
# - Vision-language model: MMMU Art & Design validation (multimodal understanding)
TASK = {
"Qwen/Qwen2.5-0.5B-Instruct": "gsm8k",
"Qwen/Qwen3-30B-A3B": "gsm8k",
"deepseek-ai/DeepSeek-V2-Lite": "gsm8k"
}
# Answer validation requiring format consistency.
FILTER = {
"Qwen/Qwen2.5-0.5B-Instruct": "exact_match,strict-match",
"Qwen/Qwen3-30B-A3B": "exact_match,strict-match",
"deepseek-ai/DeepSeek-V2-Lite": "exact_match,strict-match"
}
# 3% relative tolerance for numerical accuracy.
RTOL = 0.03
# Baseline accuracy after VLLM optimization.
EXPECTED_VALUE = {
"Qwen/Qwen2.5-0.5B-Instruct": 0.316,
"Qwen/Qwen3-30B-A3B": 0.888,
"deepseek-ai/DeepSeek-V2-Lite": 0.375
}
# Maximum context length configuration for each model.
MAX_MODEL_LEN = {
"Qwen/Qwen2.5-0.5B-Instruct": 4096,
"Qwen/Qwen3-30B-A3B": 4096,
"deepseek-ai/DeepSeek-V2-Lite": 4096
}
# Model types distinguishing text-only and vision-language models.
MODEL_TYPE = {
"Qwen/Qwen2.5-0.5B-Instruct": "vllm",
"Qwen/Qwen3-30B-A3B": "vllm",
"deepseek-ai/DeepSeek-V2-Lite": "vllm"
}
# wrap prompts in a chat-style template.
APPLY_CHAT_TEMPLATE = {
"Qwen/Qwen2.5-0.5B-Instruct": False,
"Qwen/Qwen3-30B-A3B": False,
"deepseek-ai/DeepSeek-V2-Lite": False
}
# Few-shot examples handling as multi-turn dialogues.
FEWSHOT_AS_MULTITURN = {
"Qwen/Qwen2.5-0.5B-Instruct": False,
"Qwen/Qwen3-30B-A3B": False,
"deepseek-ai/DeepSeek-V2-Lite": False
}
# MORE_ARGS extra CLI args per model
MORE_ARGS = {
"Qwen/Qwen2.5-0.5B-Instruct":
None,
"Qwen/Qwen3-30B-A3B":
"tensor_parallel_size=2,enable_expert_parallel=True,enforce_eager=True",
"deepseek-ai/DeepSeek-V2-Lite":
"tensor_parallel_size=2,trust_remote_code=True,enforce_eager=True"
}
multiprocessing.set_start_method("spawn", force=True)
def run_test(queue, model, max_model_len, model_type, more_args):
try:
if model_type == "vllm-vlm":
model_args = (f"pretrained={model},max_model_len={max_model_len},"
"dtype=auto,max_images=2")
else:
model_args = (f"pretrained={model},max_model_len={max_model_len},"
"dtype=auto")
if more_args is not None:
model_args = f"{model_args},{more_args}"
results = lm_eval.simple_evaluate(
model=model_type,
model_args=model_args,
tasks=TASK[model],
batch_size="auto",
apply_chat_template=APPLY_CHAT_TEMPLATE[model],
fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model],
)
result = results["results"][TASK[model]][FILTER[model]]
print("result:", result)
queue.put(result)
except Exception as e:
error_msg = f"{type(e).__name__}: {str(e)}"
queue.put(error_msg)
sys.exit(1)
finally:
gc.collect()
torch.npu.empty_cache()
@pytest.mark.parametrize("model", MODEL_NAME)
def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model):
with monkeypatch.context():
result_queue: Queue[float] = multiprocessing.Queue()
p = multiprocessing.Process(target=run_test,
args=(result_queue, model,
MAX_MODEL_LEN[model],
MODEL_TYPE[model], MORE_ARGS[model]))
p.start()
p.join()
result = result_queue.get()
print(result)
assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \
f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}"
DP_DENSCE_MODEL = ["Qwen/Qwen2.5-0.5B-Instruct"]
DP_MOE_MOEDL = ["Qwen/Qwen3-30B-A3B"]
DP_MORE_ARGS = {
"Qwen/Qwen2.5-0.5B-Instruct":
"tensor_parallel_size=2,data_parallel_size=2",
"Qwen/Qwen3-30B-A3B":
"tensor_parallel_size=2,data_parallel_size=2,enable_expert_parallel=True,max_model_len=1024,enforce_eager=True",
}
@pytest.mark.parametrize("model", DP_DENSCE_MODEL)
def test_lm_eval_accuracy_dp(model):
result_queue: Queue[float] = multiprocessing.Queue()
p = multiprocessing.Process(target=run_test,
args=(result_queue, model,
MAX_MODEL_LEN[model], MODEL_TYPE[model],
DP_MORE_ARGS[model]))
p.start()
p.join()
result = result_queue.get()
print(result)
assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \
f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}"

View File

@@ -1,115 +0,0 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py
#
import gc
import multiprocessing
import sys
from multiprocessing import Queue
import lm_eval
import pytest
import torch
# pre-trained model path on Hugging Face.
MODEL_NAME = ["Qwen/Qwen2.5-0.5B-Instruct", "Qwen/Qwen2.5-VL-3B-Instruct"]
# Benchmark configuration mapping models to evaluation tasks:
# - Text model: GSM8K (grade school math reasoning)
# - Vision-language model: MMMU Art & Design validation (multimodal understanding)
TASK = {
"Qwen/Qwen2.5-0.5B-Instruct": "gsm8k",
"Qwen/Qwen2.5-VL-3B-Instruct": "mmmu_val_art_and_design"
}
# Answer validation requiring format consistency.
FILTER = {
"Qwen/Qwen2.5-0.5B-Instruct": "exact_match,strict-match",
"Qwen/Qwen2.5-VL-3B-Instruct": "acc,none"
}
# 3% relative tolerance for numerical accuracy.
RTOL = 0.03
# Baseline accuracy after VLLM optimization.
EXPECTED_VALUE = {
"Qwen/Qwen2.5-0.5B-Instruct": 0.316,
"Qwen/Qwen2.5-VL-3B-Instruct": 0.566
}
# Maximum context length configuration for each model.
MAX_MODEL_LEN = {
"Qwen/Qwen2.5-0.5B-Instruct": 4096,
"Qwen/Qwen2.5-VL-3B-Instruct": 8192
}
# Model types distinguishing text-only and vision-language models.
MODEL_TYPE = {
"Qwen/Qwen2.5-0.5B-Instruct": "vllm",
"Qwen/Qwen2.5-VL-3B-Instruct": "vllm-vlm"
}
# wrap prompts in a chat-style template.
APPLY_CHAT_TEMPLATE = {"vllm": False, "vllm-vlm": True}
# Few-shot examples handling as multi-turn dialogues.
FEWSHOT_AS_MULTITURN = {"vllm": False, "vllm-vlm": True}
# batch_size
BATCH_SIZE = {
"Qwen/Qwen2.5-0.5B-Instruct": "auto",
"Qwen/Qwen2.5-VL-3B-Instruct": 1
}
multiprocessing.set_start_method("spawn", force=True)
def run_test(queue, model, max_model_len, model_type):
try:
if model_type == "vllm-vlm":
model_args = (f"pretrained={model},max_model_len={max_model_len},"
"tensor_parallel_size=1,dtype=auto,max_images=2")
else:
model_args = (f"pretrained={model},max_model_len={max_model_len},"
"tensor_parallel_size=1,dtype=auto")
results = lm_eval.simple_evaluate(
model=model_type,
model_args=model_args,
tasks=TASK[model],
batch_size=BATCH_SIZE[model],
apply_chat_template=APPLY_CHAT_TEMPLATE[model_type],
fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model_type],
)
result = results["results"][TASK[model]][FILTER[model]]
print("result:", result)
queue.put(result)
except Exception as e:
queue.put(e)
sys.exit(1)
finally:
gc.collect()
torch.npu.empty_cache()
@pytest.mark.parametrize("model", MODEL_NAME)
def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model):
with monkeypatch.context():
result_queue: Queue[float] = multiprocessing.Queue()
p = multiprocessing.Process(target=run_test,
args=(result_queue, model,
MAX_MODEL_LEN[model],
MODEL_TYPE[model]))
p.start()
p.join()
result = result_queue.get()
if isinstance(result, Exception):
pytest.fail(f"Subprocess failed with exception: {str(result)}")
print(result)
assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \
f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}"

View File

@@ -0,0 +1,13 @@
model_name: "deepseek-ai/DeepSeek-V2-Lite"
tasks:
- name: "gsm8k"
metrics:
- name: "exact_match,strict-match"
value: 0.375
- name: "exact_match,flexible-extract"
value: 0.375
tensor_parallel_size: 2
apply_chat_template: False
fewshot_as_multiturn: False
trust_remote_code: True
enforce_eager: True

View File

@@ -21,14 +21,14 @@ def pytest_addoption(parser):
parser.addoption(
"--config",
action="store",
default="./tests/e2e/singlecard/models/configs/Qwen3-8B-Base.yaml",
default="./tests/e2e/models/configs/Qwen3-8B-Base.yaml",
help="Path to the model config YAML file",
)
parser.addoption(
"--report_output",
"--report-dir",
action="store",
default="./benchmarks/accuracy/Qwen3-8B-Base.md",
help="Path to the report output file",
default="./benchmarks/accuracy",
help="Directory to store report files",
)
@@ -49,25 +49,24 @@ def config(pytestconfig):
@pytest.fixture(scope="session")
def report_output(pytestconfig):
return pytestconfig.getoption("--report_output")
def report_dir(pytestconfig):
return pytestconfig.getoption("report_dir")
def pytest_generate_tests(metafunc):
if "config_filename" in metafunc.fixturenames:
# If config specified, use the --config directly
single_config = metafunc.config.getoption("--config")
if single_config:
metafunc.parametrize("config_filename",
[Path(single_config).resolve()])
return
# Otherwise, check --config-list-file
rel_path = metafunc.config.getoption("--config-list-file")
config_list_file = Path(rel_path).resolve()
config_dir = config_list_file.parent
with open(config_list_file, encoding="utf-8") as f:
configs = [
config_dir / line.strip() for line in f
if line.strip() and not line.startswith("#")
]
metafunc.parametrize("config_filename", configs)
if metafunc.config.getoption("--config-list-file"):
rel_path = metafunc.config.getoption("--config-list-file")
config_list_file = Path(rel_path).resolve()
config_dir = config_list_file.parent
with open(config_list_file, encoding="utf-8") as f:
configs = [
config_dir / line.strip() for line in f
if line.strip() and not line.startswith("#")
]
metafunc.parametrize("config_filename", configs)
else:
single_config = metafunc.config.getoption("--config")
config_path = Path(single_config).resolve()
metafunc.parametrize("config_filename", [config_path])

View File

@@ -48,7 +48,7 @@ def build_model_args(eval_config, tp_size):
}
for s in [
"max_images", "gpu_memory_utilization", "enable_expert_parallel",
"tensor_parallel_size"
"tensor_parallel_size", "enforce_eager"
]:
val = eval_config.get(s, None)
if val is not None:
@@ -60,8 +60,7 @@ def build_model_args(eval_config, tp_size):
return model_args
def generate_report(tp_size, eval_config, report_data, report_output,
env_config):
def generate_report(tp_size, eval_config, report_data, report_dir, env_config):
env = Environment(loader=FileSystemLoader(TEST_DIR))
template = env.get_template("report_template.md")
model_args = build_model_args(eval_config, tp_size)
@@ -85,12 +84,14 @@ def generate_report(tp_size, eval_config, report_data, report_output,
num_fewshot=eval_config.get("num_fewshot", "N/A"),
rows=report_data["rows"])
report_output = os.path.join(
report_dir, f"{os.path.basename(eval_config['model_name'])}.md")
os.makedirs(os.path.dirname(report_output), exist_ok=True)
with open(report_output, 'w', encoding='utf-8') as f:
f.write(report_content)
def test_lm_eval_correctness_param(config_filename, tp_size, report_output,
def test_lm_eval_correctness_param(config_filename, tp_size, report_dir,
env_config):
eval_config = yaml.safe_load(config_filename.read_text(encoding="utf-8"))
model_args = build_model_args(eval_config, tp_size)
@@ -143,6 +144,5 @@ def test_lm_eval_correctness_param(config_filename, tp_size, report_output,
metric_name.replace(',', '_stderr,') if metric_name ==
"acc,none" else metric_name.replace(',', '_stderr,')]
})
generate_report(tp_size, eval_config, report_data, report_output,
env_config)
generate_report(tp_size, eval_config, report_data, report_dir, env_config)
assert success