[Bugfix] Fix deepseek percision issue and add acc ci for it (#905)

### What this PR does / why we need it?
Fix deepseek percision issue on V0 and add acc ci for it
Fixes https://github.com/vllm-project/vllm-ascend/issues/1062
### How was this patch tested?
CI passed with new added test.

Signed-off-by: MengqingCao <cmq0113@163.com>
This commit is contained in:
Mengqing Cao
2025-06-04 20:26:44 +08:00
committed by GitHub
parent da9acfca60
commit afc4c0cd03
9 changed files with 121 additions and 43 deletions

View File

@@ -354,4 +354,4 @@ def prompt_template(request):
@pytest.fixture(scope="session")
def ilama_lora_files():
return snapshot_download(repo_id="jeeejeee/ilama-text2sql-spider")
return snapshot_download(repo_id="jeeejeee/ilama-text2sql-spider")

View File

@@ -0,0 +1,72 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py
#
import gc
import multiprocessing
from multiprocessing import Queue
import lm_eval
import pytest
import torch
# pre-trained model path on Hugging Face.
MODELS = ["deepseek-ai/DeepSeek-V2-Lite"]
# Math reasoning benchmark (Grade School Math 8K).
TASK = "gsm8k"
# Answer validation requiring format consistency.
FILTER = "exact_match,strict-match"
# 3% relative tolerance for numerical accuracy.
RTOL = 0.03
# Baseline accuracy after VLLM optimization.
# FIXME: fix the accuracy issue
EXPECTED_VALUE = 0.000758150113722517
def run_test(model_name, queue, more_args=None):
model_args = f"pretrained={model_name},max_model_len=4096,trust_remote_code=True,tensor_parallel_size=4"
if more_args is not None:
model_args = f"{model_args},{more_args}"
results = lm_eval.simple_evaluate(
model="vllm",
model_args=model_args,
tasks=TASK,
batch_size="auto",
)
result = results["results"][TASK][FILTER]
print(100 * "*", "\nThe accuracy test result:", result)
queue.put(result)
del results
torch.npu.empty_cache()
gc.collect()
@pytest.mark.parametrize("model", MODELS)
def test_lm_eval_accuracy(model, monkeypatch: pytest.MonkeyPatch):
with monkeypatch.context():
result_queue: Queue[float] = multiprocessing.Queue()
p = multiprocessing.Process(target=run_test,
args=(
model,
result_queue,
))
p.start()
p.join()
result = result_queue.get()
assert (EXPECTED_VALUE - RTOL < result < EXPECTED_VALUE + RTOL), \
f"Expected: {EXPECTED_VALUE}±{RTOL} | Measured: {result}"

View File

@@ -22,7 +22,6 @@ Run `pytest tests/test_offline_inference.py`.
"""
import os
import pytest
import vllm # noqa: F401
from tests.conftest import VllmRunner
@@ -47,7 +46,6 @@ def test_models_distributed_QwQ():
vllm_model.generate_greedy(example_prompts, max_tokens)
@pytest.mark.skipif(True, reason="wait for mla issue fixed on v1")
def test_models_distributed_DeepSeek():
example_prompts = [
"vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs.",