[CI] Add qwen2.5-vl test (#643)
### What this PR does / why we need it? Part of #499 Add qwen2.5-vl test on single npu, v1 engine is excluded because qwen2.5-vl has some problems with v1 now, at the same time, this test can also make #639 more credible Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
@@ -31,7 +31,7 @@ from vllm.outputs import RequestOutput
|
|||||||
from vllm.sampling_params import BeamSearchParams
|
from vllm.sampling_params import BeamSearchParams
|
||||||
from vllm.utils import is_list_of
|
from vllm.utils import is_list_of
|
||||||
|
|
||||||
from tests.model_utils import (TokensTextLogprobs,
|
from tests.model_utils import (PROMPT_TEMPLATES, TokensTextLogprobs,
|
||||||
TokensTextLogprobsPromptLogprobs)
|
TokensTextLogprobsPromptLogprobs)
|
||||||
# TODO: remove this part after the patch merged into vllm, if
|
# TODO: remove this part after the patch merged into vllm, if
|
||||||
# we not explicitly patch here, some of them might be effectiveless
|
# we not explicitly patch here, some of them might be effectiveless
|
||||||
@@ -344,3 +344,8 @@ class VllmRunner:
|
|||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def vllm_runner():
|
def vllm_runner():
|
||||||
return VllmRunner
|
return VllmRunner
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(params=list(PROMPT_TEMPLATES.keys()))
|
||||||
|
def prompt_template(request):
|
||||||
|
return PROMPT_TEMPLATES[request.param]
|
||||||
@@ -18,7 +18,7 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from vllm.config import ModelConfig, TaskOption
|
from vllm.config import ModelConfig, TaskOption
|
||||||
@@ -301,3 +301,16 @@ def build_model_context(model_name: str,
|
|||||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||||
)
|
)
|
||||||
return InputContext(model_config)
|
return InputContext(model_config)
|
||||||
|
|
||||||
|
|
||||||
|
def qwen_prompt(questions: List[str]) -> List[str]:
|
||||||
|
placeholder = "<|image_pad|>"
|
||||||
|
return [("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
|
||||||
|
f"<|im_start|>user\n<|vision_start|>{placeholder}<|vision_end|>"
|
||||||
|
f"{q}<|im_end|>\n<|im_start|>assistant\n") for q in questions]
|
||||||
|
|
||||||
|
|
||||||
|
# Map of prompt templates for different models.
|
||||||
|
PROMPT_TEMPLATES: dict[str, Callable] = {
|
||||||
|
"qwen2.5vl": qwen_prompt,
|
||||||
|
}
|
||||||
@@ -24,6 +24,7 @@ import os
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import vllm # noqa: F401
|
import vllm # noqa: F401
|
||||||
|
from vllm.assets.image import ImageAsset
|
||||||
|
|
||||||
import vllm_ascend # noqa: F401
|
import vllm_ascend # noqa: F401
|
||||||
from tests.conftest import VllmRunner
|
from tests.conftest import VllmRunner
|
||||||
@@ -32,6 +33,7 @@ MODELS = [
|
|||||||
"Qwen/Qwen2.5-0.5B-Instruct",
|
"Qwen/Qwen2.5-0.5B-Instruct",
|
||||||
"vllm-ascend/Qwen2.5-0.5B-Instruct-w8a8",
|
"vllm-ascend/Qwen2.5-0.5B-Instruct-w8a8",
|
||||||
]
|
]
|
||||||
|
MULTIMODALITY_MODELS = ["Qwen/Qwen2.5-VL-3B-Instruct"]
|
||||||
os.environ["VLLM_USE_MODELSCOPE"] = "True"
|
os.environ["VLLM_USE_MODELSCOPE"] = "True"
|
||||||
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
|
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
|
||||||
|
|
||||||
@@ -55,6 +57,32 @@ def test_models(model: str, dtype: str, max_tokens: int) -> None:
|
|||||||
vllm_model.generate_greedy(example_prompts, max_tokens)
|
vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("model", MULTIMODALITY_MODELS)
|
||||||
|
@pytest.mark.skipif(os.getenv("VLLM_USE_V1") == "1",
|
||||||
|
reason="qwen2.5_vl is not supported on v1")
|
||||||
|
def test_multimodal(model, prompt_template, vllm_runner):
|
||||||
|
image = ImageAsset("cherry_blossom") \
|
||||||
|
.pil_image.convert("RGB")
|
||||||
|
img_questions = [
|
||||||
|
"What is the content of this image?",
|
||||||
|
"Describe the content of this image in detail.",
|
||||||
|
"What's in the image?",
|
||||||
|
"Where is this image taken?",
|
||||||
|
]
|
||||||
|
images = [image] * len(img_questions)
|
||||||
|
prompts = prompt_template(img_questions)
|
||||||
|
with vllm_runner(model,
|
||||||
|
max_model_len=4096,
|
||||||
|
mm_processor_kwargs={
|
||||||
|
"min_pixels": 28 * 28,
|
||||||
|
"max_pixels": 1280 * 28 * 28,
|
||||||
|
"fps": 1,
|
||||||
|
}) as vllm_model:
|
||||||
|
vllm_model.generate_greedy(prompts=prompts,
|
||||||
|
images=images,
|
||||||
|
max_tokens=64)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import pytest
|
import pytest
|
||||||
pytest.main([__file__])
|
pytest.main([__file__])
|
||||||
|
|||||||
Reference in New Issue
Block a user