### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
| `tests/e2e/310p/multicard/test_vl_model_multicard.py` |
| `tests/e2e/310p/singlecard/test_vl_model_singlecard.py` |
| `tests/e2e/310p/test_utils.py` |
| `tests/e2e/conftest.py` |
| `tests/e2e/model_utils.py` |
| `tests/e2e/models/conftest.py` |
| `tests/e2e/models/test_lm_eval_correctness.py` |
| `tests/e2e/multicard/2-cards/spec_decode/test_spec_decode.py` |
| `tests/e2e/multicard/2-cards/test_aclgraph_capture_replay.py` |
| `tests/e2e/multicard/2-cards/test_data_parallel.py` |
| `tests/e2e/multicard/2-cards/test_disaggregated_encoder.py` |
| `tests/e2e/multicard/2-cards/test_expert_parallel.py` |
| `tests/e2e/multicard/2-cards/test_external_launcher.py` |
| `tests/e2e/multicard/2-cards/test_full_graph_mode.py` |
| `tests/e2e/multicard/2-cards/test_ilama_lora_tp2.py` |
| `tests/e2e/multicard/2-cards/test_offline_inference_distributed.py` |
| `tests/e2e/multicard/2-cards/test_offline_weight_load.py` |
| `tests/e2e/multicard/2-cards/test_pipeline_parallel.py` |
| `tests/e2e/multicard/2-cards/test_prefix_caching.py` |
| `tests/e2e/multicard/2-cards/test_quantization.py` |
| `tests/e2e/multicard/2-cards/test_qwen3_moe.py` |
| `tests/e2e/multicard/2-cards/test_qwen3_moe_routing_replay.py` |
| `tests/e2e/multicard/2-cards/test_qwen3_performance.py` |
| `tests/e2e/multicard/2-cards/test_shared_expert_dp.py` |
| `tests/e2e/multicard/2-cards/test_single_request_aclgraph.py` |
| `tests/e2e/multicard/2-cards/test_sp_pass.py` |
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.15.0
- vLLM main:
9562912cea
Signed-off-by: MrZ20 <2609716663@qq.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -15,28 +15,23 @@
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add 310p directory to sys.path
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
parent_dir = os.path.dirname(current_dir) # 310p directory
|
||||
sys.path.insert(0, parent_dir)
|
||||
|
||||
# ruff: noqa: E402
|
||||
from test_utils import run_vl_model_test
|
||||
|
||||
|
||||
def test_qwen3_vl_8b_tp2_fp16():
|
||||
"""Qwen3-VL-8B dual-card FP16 test"""
|
||||
run_vl_model_test(
|
||||
model_name="Qwen/Qwen3-VL-8B-Instruct",
|
||||
tensor_parallel_size=2,
|
||||
max_tokens=5
|
||||
)
|
||||
run_vl_model_test(model_name="Qwen/Qwen3-VL-8B-Instruct", tensor_parallel_size=2, max_tokens=5)
|
||||
|
||||
|
||||
def test_qwen3_vl_32b_tp1_fp16():
|
||||
"""Qwen3-VL-32B 4-card FP16 test"""
|
||||
run_vl_model_test(
|
||||
model_name="Qwen/Qwen3-VL-32B-Instruct",
|
||||
tensor_parallel_size=4,
|
||||
max_tokens=5
|
||||
)
|
||||
run_vl_model_test(model_name="Qwen/Qwen3-VL-32B-Instruct", tensor_parallel_size=4, max_tokens=5)
|
||||
|
||||
@@ -15,20 +15,18 @@
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add 310p directory to sys.path
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
parent_dir = os.path.dirname(current_dir) # 310p directory
|
||||
sys.path.insert(0, parent_dir)
|
||||
|
||||
# ruff: noqa: E402
|
||||
from test_utils import run_vl_model_test
|
||||
|
||||
|
||||
def test_qwen3_vl_8b_tp1_fp16():
|
||||
"""Qwen3-VL-8B single-card FP16 test"""
|
||||
run_vl_model_test(
|
||||
model_name="Qwen/Qwen3-VL-8B-Instruct",
|
||||
tensor_parallel_size=1,
|
||||
max_tokens=5
|
||||
)
|
||||
run_vl_model_test(model_name="Qwen/Qwen3-VL-8B-Instruct", tensor_parallel_size=1, max_tokens=5)
|
||||
|
||||
@@ -15,10 +15,12 @@
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
from PIL import Image
|
||||
import os
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
|
||||
def get_test_image():
|
||||
"""Get the image object for testing"""
|
||||
@@ -32,14 +34,12 @@ def get_test_prompts():
|
||||
return ["<|image_pad|>Describe this image in detail."]
|
||||
|
||||
|
||||
def run_vl_model_test(model_name: str,
|
||||
tensor_parallel_size: int,
|
||||
max_tokens: int,
|
||||
dtype: str = "float16",
|
||||
enforce_eager: bool = True):
|
||||
def run_vl_model_test(
|
||||
model_name: str, tensor_parallel_size: int, max_tokens: int, dtype: str = "float16", enforce_eager: bool = True
|
||||
):
|
||||
"""
|
||||
Generic visual language model test function
|
||||
|
||||
|
||||
Args:
|
||||
model_name: Model name, e.g., "Qwen/Qwen3-VL-4B"
|
||||
tensor_parallel_size: Tensor parallel size
|
||||
@@ -52,9 +52,6 @@ def run_vl_model_test(model_name: str,
|
||||
prompts = get_test_prompts()
|
||||
|
||||
with VllmRunner(
|
||||
model_name,
|
||||
tensor_parallel_size=tensor_parallel_size,
|
||||
enforce_eager=enforce_eager,
|
||||
dtype=dtype
|
||||
model_name, tensor_parallel_size=tensor_parallel_size, enforce_eager=enforce_eager, dtype=dtype
|
||||
) as vllm_model:
|
||||
vllm_model.generate_greedy(prompts, max_tokens, images=images)
|
||||
vllm_model.generate_greedy(prompts, max_tokens, images=images)
|
||||
|
||||
Reference in New Issue
Block a user