Files
xc-llm-ascend/tests/e2e/long_term/test_accuracy.py
wangxiyuan 69b817ed65 [CI] Add unit test framework (#1201)
This PR added the unit test framework to enable ut for vLLM Ascend. Unit
test runs on CPU machines. It'll be ran once lint check is passed the
same as e2e test.

For unit test, this PR created a new folder called `ut` under `tests`
module. All the test file in `ut` should keep the same with the code in
`vllm-ascend`. The file name should be start with `test_` prefix. For
example, in this PR. the `test_ascend_config.py` is added for
`ascend_config.py` test.

A new fille `worker/test_worker_v1.py` is also added as the placeholder.
This file should be the unit test for `vllm-ascend/worker/worker_v1.py`.

Additional, a new `fake_weight` folder is added, it contains the
config.json from `facebook/opt-125m`, so that the test will not always
visit huggingface.

TODO:
We should add all the unit test file one by one in the future.

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
2025-06-16 18:32:28 +08:00

112 lines
4.1 KiB
Python

#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py
#
import gc
import multiprocessing
import sys
from multiprocessing import Queue
import lm_eval
import pytest
import torch
# pre-trained model path on Hugging Face.
MODEL_NAME = ["Qwen/Qwen2.5-0.5B-Instruct", "Qwen/Qwen2.5-VL-3B-Instruct"]
# Benchmark configuration mapping models to evaluation tasks:
# - Text model: GSM8K (grade school math reasoning)
# - Vision-language model: MMMU Art & Design validation (multimodal understanding)
TASK = {
"Qwen/Qwen2.5-0.5B-Instruct": "gsm8k",
"Qwen/Qwen2.5-VL-3B-Instruct": "mmmu_val_art_and_design"
}
# Answer validation requiring format consistency.
FILTER = {
"Qwen/Qwen2.5-0.5B-Instruct": "exact_match,strict-match",
"Qwen/Qwen2.5-VL-3B-Instruct": "acc,none"
}
# 3% relative tolerance for numerical accuracy.
RTOL = 0.03
# Baseline accuracy after VLLM optimization.
EXPECTED_VALUE = {
"Qwen/Qwen2.5-0.5B-Instruct": 0.316,
"Qwen/Qwen2.5-VL-3B-Instruct": 0.541
}
# Maximum context length configuration for each model.
MAX_MODEL_LEN = {
"Qwen/Qwen2.5-0.5B-Instruct": 4096,
"Qwen/Qwen2.5-VL-3B-Instruct": 8192
}
# Model types distinguishing text-only and vision-language models.
MODEL_TYPE = {
"Qwen/Qwen2.5-0.5B-Instruct": "vllm",
"Qwen/Qwen2.5-VL-3B-Instruct": "vllm-vlm"
}
# wrap prompts in a chat-style template.
APPLY_CHAT_TEMPLATE = {"vllm": False, "vllm-vlm": True}
# Few-shot examples handling as multi-turn dialogues.
FEWSHOT_AS_MULTITURN = {"vllm": False, "vllm-vlm": True}
def run_test(queue, model, max_model_len, model_type):
try:
if model_type == "vllm-vlm":
model_args = (f"pretrained={model},max_model_len={max_model_len},"
"dtype=auto,max_images=2")
else:
model_args = (f"pretrained={model},max_model_len={max_model_len},"
"dtype=auto")
results = lm_eval.simple_evaluate(
model=model_type,
model_args=model_args,
tasks=TASK[model],
batch_size="auto",
apply_chat_template=APPLY_CHAT_TEMPLATE[model_type],
fewshot_as_multiturn=FEWSHOT_AS_MULTITURN[model_type],
)
result = results["results"][TASK[model]][FILTER[model]]
print("result:", result)
queue.put(result)
except Exception as e:
queue.put(e)
sys.exit(1)
finally:
gc.collect()
torch.npu.empty_cache()
@pytest.mark.parametrize("model", MODEL_NAME)
@pytest.mark.parametrize("VLLM_USE_V1", ["0", "1"])
def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch, model, VLLM_USE_V1):
if model == "Qwen/Qwen2.5-VL-3B-Instruct" and VLLM_USE_V1 == "1":
pytest.skip(
"Qwen2.5-VL-3B-Instruct is not supported when VLLM_USE_V1=1")
with monkeypatch.context() as m:
m.setenv("VLLM_USE_V1", VLLM_USE_V1)
result_queue: Queue[float] = multiprocessing.Queue()
p = multiprocessing.Process(target=run_test,
args=(result_queue, model,
MAX_MODEL_LEN[model],
MODEL_TYPE[model]))
p.start()
p.join()
result = result_queue.get()
print(result)
assert (EXPECTED_VALUE[model] - RTOL < result < EXPECTED_VALUE[model] + RTOL), \
f"Expected: {EXPECTED_VALUE[model]}±{RTOL} | Measured: {result}"