### What this PR does / why we need it?
efect e2e ci test:
1. tests/e2e/singlecard/pooling/test_embedding.py: remove the eager
parameter and rename test case
2. tests/e2e/singlecard/pooling/test_scoring.py: Rename test cases
3. tests/e2e/singlecard/pooling/test_classification.py: Rename test case
4. tests/e2e/singlecard/test_quantization.py: remove the eager parameter
and chage model to vllm-ascend/Qwen2.5-0.6B-W8A8 and Rename test case
5. tests/e2e/multicard/test_shared_expert_dp.py: Rename test cases
6. tests/e2e/singlecard/test_sampler.py: Rename test cases
7. tests/e2e/singlecard/test_aclgraph_accuracy.py: Rename test cases
8. tests/e2e/multicard/test_offline_inference_distributed.py: Rename
test cases and remove the eager parameter
9. tests/e2e/multicard/long_sequence/test_accuracy.py: Rename test cases
and remove the eager parameter
10. tests/e2e/multicard/long_sequence/test_basic.py: Rename test cases
and remove the eager parameter
11.tests/e2e/multicard/test_expert_parallel.py:remove the eager
parameter
12.tests/e2e/multicard/test_full_graph_mode.py:remove the eager
parameter
13.tests/e2e/multicard/test_ilama_lora_tp2.py:remove the eager parameter
14.tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py:remove
the eager parameter
15.tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py:remove the
eager parameter
16.tests/e2e/singlecard/test_aclgraph_accuracy.py:remove the eager
parameter
17.tests/e2e/singlecard/test_camem.py:remove the eager parameter
18.tests/e2e/singlecard/test_ilama_lora.py:remove the eager parameter
19.tests/e2e/singlecard/test_multistream_overlap_shared_expert.py:remove
the eager parameter
20.tests/e2e/singlecard/test_vlm.py:remove the eager parameter
21.tests/e2e/singlecard/test_xli:remove the eager parameter
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
97 lines
3.4 KiB
Python
97 lines
3.4 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
|
#
|
|
"""Compare the short outputs of HF and vLLM when using greedy sampling.
|
|
|
|
Run `pytest tests/test_offline_inference.py`.
|
|
"""
|
|
import os
|
|
from unittest.mock import patch
|
|
|
|
from vllm import SamplingParams
|
|
from vllm.assets.audio import AudioAsset
|
|
from vllm.assets.image import ImageAsset
|
|
|
|
from tests.e2e.conftest import VllmRunner
|
|
|
|
|
|
@patch.dict(os.environ, {"VLLM_WORKER_MULTIPROC_METHOD": "spawn"})
|
|
def test_multimodal_vl(vl_config):
|
|
image = ImageAsset("cherry_blossom").pil_image.convert("RGB")
|
|
|
|
img_questions = [
|
|
"What is the content of this image?",
|
|
"Describe the content of this image in detail.",
|
|
"What's in the image?",
|
|
"Where is this image taken?",
|
|
]
|
|
|
|
images = [image] * len(img_questions)
|
|
prompts = vl_config["prompt_fn"](img_questions)
|
|
|
|
with VllmRunner(vl_config["model"],
|
|
mm_processor_kwargs=vl_config["mm_processor_kwargs"],
|
|
max_model_len=8192,
|
|
limit_mm_per_prompt={"image": 1}) as vllm_model:
|
|
outputs = vllm_model.generate_greedy(
|
|
prompts=prompts,
|
|
images=images,
|
|
max_tokens=64,
|
|
)
|
|
|
|
assert len(outputs) == len(prompts)
|
|
|
|
for _, output_str in outputs:
|
|
assert output_str, "Generated output should not be empty."
|
|
|
|
|
|
@patch.dict(os.environ, {"VLLM_WORKER_MULTIPROC_METHOD": "spawn"})
|
|
def test_multimodal_audio():
|
|
audio_prompt = "".join([
|
|
f"Audio {idx+1}: <|audio_bos|><|AUDIO|><|audio_eos|>\n"
|
|
for idx in range(2)
|
|
])
|
|
question = "What sport and what nursery rhyme are referenced?"
|
|
prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
|
|
"<|im_start|>user\n"
|
|
f"{audio_prompt}{question}<|im_end|>\n"
|
|
"<|im_start|>assistant\n")
|
|
mm_data = {
|
|
"audio": [
|
|
asset.audio_and_sample_rate for asset in
|
|
[AudioAsset("mary_had_lamb"),
|
|
AudioAsset("winning_call")]
|
|
]
|
|
}
|
|
inputs = {"prompt": prompt, "multi_modal_data": mm_data}
|
|
|
|
sampling_params = SamplingParams(temperature=0.2,
|
|
max_tokens=10,
|
|
stop_token_ids=None)
|
|
|
|
with VllmRunner("Qwen/Qwen2-Audio-7B-Instruct",
|
|
max_model_len=4096,
|
|
max_num_seqs=5,
|
|
dtype="bfloat16",
|
|
limit_mm_per_prompt={"audio": 2},
|
|
gpu_memory_utilization=0.9) as runner:
|
|
outputs = runner.generate(inputs, sampling_params=sampling_params)
|
|
|
|
assert outputs is not None, "Generated outputs should not be None."
|
|
assert len(outputs) > 0, "Generated outputs should not be empty."
|