### What this PR does / why we need it?
efect e2e ci test:
1. tests/e2e/singlecard/pooling/test_embedding.py: remove the eager
parameter and rename test case
2. tests/e2e/singlecard/pooling/test_scoring.py: Rename test cases
3. tests/e2e/singlecard/pooling/test_classification.py: Rename test case
4. tests/e2e/singlecard/test_quantization.py: remove the eager parameter
and chage model to vllm-ascend/Qwen2.5-0.6B-W8A8 and Rename test case
5. tests/e2e/multicard/test_shared_expert_dp.py: Rename test cases
6. tests/e2e/singlecard/test_sampler.py: Rename test cases
7. tests/e2e/singlecard/test_aclgraph_accuracy.py: Rename test cases
8. tests/e2e/multicard/test_offline_inference_distributed.py: Rename
test cases and remove the eager parameter
9. tests/e2e/multicard/long_sequence/test_accuracy.py: Rename test cases
and remove the eager parameter
10. tests/e2e/multicard/long_sequence/test_basic.py: Rename test cases
and remove the eager parameter
11.tests/e2e/multicard/test_expert_parallel.py:remove the eager
parameter
12.tests/e2e/multicard/test_full_graph_mode.py:remove the eager
parameter
13.tests/e2e/multicard/test_ilama_lora_tp2.py:remove the eager parameter
14.tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py:remove
the eager parameter
15.tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py:remove the
eager parameter
16.tests/e2e/singlecard/test_aclgraph_accuracy.py:remove the eager
parameter
17.tests/e2e/singlecard/test_camem.py:remove the eager parameter
18.tests/e2e/singlecard/test_ilama_lora.py:remove the eager parameter
19.tests/e2e/singlecard/test_multistream_overlap_shared_expert.py:remove
the eager parameter
20.tests/e2e/singlecard/test_vlm.py:remove the eager parameter
21.tests/e2e/singlecard/test_xli:remove the eager parameter
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
35 lines
1.1 KiB
Python
35 lines
1.1 KiB
Python
import torch
|
|
from modelscope import snapshot_download # type: ignore[import-untyped]
|
|
from transformers import AutoModelForSequenceClassification
|
|
|
|
from tests.e2e.conftest import HfRunner, VllmRunner
|
|
|
|
|
|
def test_qwen_pooling_classify_correctness() -> None:
|
|
|
|
model_name = snapshot_download("Howeee/Qwen2.5-1.5B-apeach")
|
|
|
|
prompts = [
|
|
"Hello, my name is",
|
|
"The president of the United States is",
|
|
"The capital of France is",
|
|
"The future of AI is what",
|
|
]
|
|
with VllmRunner(
|
|
model_name,
|
|
runner="pooling",
|
|
max_model_len=None,
|
|
cudagraph_capture_sizes=[4],
|
|
) as vllm_runner:
|
|
vllm_outputs = vllm_runner.classify(prompts)
|
|
|
|
with HfRunner(model_name,
|
|
dtype="float32",
|
|
auto_cls=AutoModelForSequenceClassification) as hf_runner:
|
|
hf_outputs = hf_runner.classify(prompts)
|
|
|
|
for hf_output, vllm_output in zip(hf_outputs, vllm_outputs):
|
|
hf_output = torch.tensor(hf_output)
|
|
vllm_output = torch.tensor(vllm_output)
|
|
assert torch.allclose(hf_output, vllm_output, 1e-2)
|