add qwen3
This commit is contained in:
@@ -0,0 +1,88 @@
|
||||
"""Compare the outputs of HF and vLLM when using greedy sampling.
|
||||
|
||||
Run `pytest tests/models/test_models.py`.
|
||||
"""
|
||||
import pytest
|
||||
|
||||
from ...utils import check_logprobs_close
|
||||
|
||||
|
||||
'''
|
||||
=============================
|
||||
Modify by vllm_mlu
|
||||
=============================
|
||||
@brief(MODELS): Only test gpt2, Llama-3.2-1B-Instruct, opt-125m.
|
||||
'''
|
||||
@pytest.mark.parametrize(
|
||||
"model",
|
||||
[
|
||||
# pytest.param(
|
||||
# "bigscience/bloom-560m", # bloom - testing alibi slopes
|
||||
# marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||||
# ),
|
||||
pytest.param(
|
||||
"openai-community/gpt2", # gpt2
|
||||
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||||
),
|
||||
# pytest.param("Milos/slovak-gpt-j-405M"), # gptj
|
||||
# pytest.param("bigcode/tiny_starcoder_py"), # gpt_bigcode
|
||||
# pytest.param("EleutherAI/pythia-70m"), # gpt_neox
|
||||
# pytest.param(
|
||||
# "google/gemma-1.1-2b-it", # gemma
|
||||
# marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||||
# ),
|
||||
pytest.param(
|
||||
"meta-llama/Llama-3.2-1B-Instruct", # llama
|
||||
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||||
),
|
||||
# pytest.param(
|
||||
# "openbmb/MiniCPM3-4B",
|
||||
# # fused_moe not supported on CPU
|
||||
# marks=[pytest.mark.core_model],
|
||||
# ),
|
||||
pytest.param(
|
||||
"facebook/opt-125m", # opt
|
||||
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||||
),
|
||||
# pytest.param(
|
||||
# "microsoft/phi-2", # phi
|
||||
# marks=[pytest.mark.core_model],
|
||||
# ),
|
||||
# pytest.param(
|
||||
# "Qwen/Qwen2.5-0.5B-Instruct", # qwen2
|
||||
# marks=[pytest.mark.core_model],
|
||||
# ),
|
||||
# pytest.param("stabilityai/stablelm-3b-4e1t"), # stablelm
|
||||
# pytest.param("bigcode/starcoder2-3b"), # starcoder2
|
||||
])
|
||||
@pytest.mark.parametrize("dtype", ["half"])
|
||||
@pytest.mark.parametrize("max_tokens", [32])
|
||||
@pytest.mark.parametrize("num_logprobs", [5])
|
||||
def test_models(
|
||||
hf_runner,
|
||||
vllm_runner,
|
||||
example_prompts,
|
||||
model: str,
|
||||
dtype: str,
|
||||
max_tokens: int,
|
||||
num_logprobs: int,
|
||||
) -> None:
|
||||
|
||||
with hf_runner(model, dtype=dtype) as hf_model:
|
||||
hf_outputs = hf_model.generate_greedy_logprobs_limit(
|
||||
example_prompts, max_tokens, num_logprobs)
|
||||
|
||||
with vllm_runner(model, dtype=dtype) as vllm_model:
|
||||
vllm_outputs = vllm_model.generate_greedy_logprobs(
|
||||
example_prompts, max_tokens, num_logprobs)
|
||||
# This test is for verifying whether the model's extra_repr
|
||||
# can be printed correctly.
|
||||
print(vllm_model.model.llm_engine.model_executor.driver_worker.
|
||||
model_runner.model)
|
||||
|
||||
check_logprobs_close(
|
||||
outputs_0_lst=hf_outputs,
|
||||
outputs_1_lst=vllm_outputs,
|
||||
name_0="hf",
|
||||
name_1="vllm",
|
||||
)
|
||||
Reference in New Issue
Block a user