[Bugfix] Fix qwen2.5-vl-without-padding (#2623)

### What this PR does / why we need it?
Correct `AscendQwen2_5_VLForConditionalGeneration_Without_Padding`
override methods
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.10.1.1
- vLLM main:
42dc59dbac

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang
2025-09-03 14:38:55 +08:00
committed by GitHub
parent bece793be6
commit 3584306387
2 changed files with 43 additions and 19 deletions

View File

@@ -2,6 +2,8 @@ import pytest
import torch
import torch.nn.functional as F
from pytest_mock import MockerFixture
from vllm.model_executor.models.qwen2_5_vl import \
Qwen2_5_VLForConditionalGeneration
from tests.ut.base import PytestBase
from vllm_ascend.models.qwen2_5_vl_without_padding import (
@@ -396,3 +398,25 @@ class TestAscendQwen2_5_VLForConditionalGeneration_Without_Padding(PytestBase):
vl_for_conditional_generation,
AscendQwen2_5_VLForConditionalGeneration_Without_Padding,
)
def test_overridden_methods(self):
self.assert_method_overridden(
AscendQwen2_5_VLForConditionalGeneration_Without_Padding,
Qwen2_5_VLForConditionalGeneration,
"_process_image_input",
)
self.assert_method_overridden(
AscendQwen2_5_VLForConditionalGeneration_Without_Padding,
Qwen2_5_VLForConditionalGeneration,
"_process_video_input",
)
@staticmethod
def assert_method_overridden(subclass, parent, method_name: str):
"""assert subclass override parent method"""
parent_func = parent.__dict__.get(method_name)
child_func = subclass.__dict__.get(method_name)
assert child_func is not None, f"{subclass.__name__} should defined {method_name}"
assert child_func is not parent_func, f"{method_name} should override in {subclass.__name__}"

View File

@@ -320,6 +320,25 @@ class AscendQwen2_5_VisionTransformer_Without_Padding(Qwen2_5_VisionTransformer
x = x[reverse_indices, :]
return x
@MULTIMODAL_REGISTRY.register_processor(
Qwen2_5_VLMultiModalProcessor,
info=Qwen2_5_VLProcessingInfo,
dummy_inputs=Qwen2_5_VLDummyInputsBuilder)
class AscendQwen2_5_VLForConditionalGeneration_Without_Padding(
Qwen2_5_VLForConditionalGeneration):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
config: Qwen2_5_VLConfig = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.visual = AscendQwen2_5_VisionTransformer_Without_Padding(
vision_config=config.vision_config,
norm_eps=getattr(config, "rms_norm_eps", 1e-6),
quant_config=self._maybe_ignore_quant_config(quant_config),
prefix=maybe_prefix(prefix, "visual"),
)
def _process_image_input(self, image_input) -> tuple[torch.Tensor, ...]:
grid_thw = image_input["image_grid_thw"]
@@ -352,22 +371,3 @@ class AscendQwen2_5_VisionTransformer_Without_Padding(Qwen2_5_VisionTransformer
merge_size = self.visual.spatial_merge_size
sizes = grid_thw.prod(-1) // merge_size // merge_size
return video_embeds.split(sizes.tolist())
@MULTIMODAL_REGISTRY.register_processor(
Qwen2_5_VLMultiModalProcessor,
info=Qwen2_5_VLProcessingInfo,
dummy_inputs=Qwen2_5_VLDummyInputsBuilder)
class AscendQwen2_5_VLForConditionalGeneration_Without_Padding(
Qwen2_5_VLForConditionalGeneration):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
config: Qwen2_5_VLConfig = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.visual = AscendQwen2_5_VisionTransformer_Without_Padding(
vision_config=config.vision_config,
norm_eps=getattr(config, "rms_norm_eps", 1e-6),
quant_config=self._maybe_ignore_quant_config(quant_config),
prefix=maybe_prefix(prefix, "visual"),
)