From d5609e2c489cefbe8469409e119fcaa5e6754b1a Mon Sep 17 00:00:00 2001 From: whx <56632993+whx-sjtu@users.noreply.github.com> Date: Sat, 25 Oct 2025 10:34:59 +0800 Subject: [PATCH] [BugFix] Comment out newly added vlm e2e. (#3736) This PR comments out newly added vlm e2e test of ascend scheduler scenario because I found that when running in multi-batch this will stuck. Need to add this back after dealing with this issue. - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44 Signed-off-by: whx-sjtu <2952154980@qq.com> --- tests/e2e/singlecard/test_vlm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/e2e/singlecard/test_vlm.py b/tests/e2e/singlecard/test_vlm.py index 8808d1e0..cc3d50f8 100644 --- a/tests/e2e/singlecard/test_vlm.py +++ b/tests/e2e/singlecard/test_vlm.py @@ -20,7 +20,7 @@ Run `pytest tests/test_offline_inference.py`. """ - +import pytest from vllm import SamplingParams from vllm.assets.audio import AudioAsset from vllm.assets.image import ImageAsset @@ -55,6 +55,8 @@ def test_multimodal_vl(prompt_template): assert output_str, "Generated output should not be empty." +@pytest.mark.skip(reason="This e2e test will stuck in multi-batch scenario. " + "Add this back after fixing the issue.") def test_multimodal_ascend_scheduler(prompt_template): image = ImageAsset("cherry_blossom") \ .pil_image.convert("RGB")