[MM][Patch] Remove patch for cos/sin cache (#4672)

### What this PR does / why we need it?
Remove patch for https://github.com/vllm-project/vllm/pull/28798.

- vLLM version: v0.12.0

Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
Shanshan Shen
2025-12-04 22:30:06 +08:00
committed by GitHub
parent b3e1377a92
commit fb15fec662
2 changed files with 6 additions and 666 deletions

View File

@@ -15,38 +15,16 @@
# limitations under the License.
#
from functools import lru_cache, partial
import einops
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_npu
from transformers.models.qwen2_5_vl.configuration_qwen2_5_vl import \
Qwen2_5_VLVisionConfig
from transformers.models.qwen2_vl.configuration_qwen2_vl import \
Qwen2VLVisionConfig
from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.attention.layer import maybe_get_vit_flash_attn_backend
from vllm.model_executor.layers.activation import get_act_and_mul_fn
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.rotary_embedding.common import (
apply_rotary_emb_torch, dispatch_rotary_emb_function)
from vllm.model_executor.models.qwen2_5_vl import (
Qwen2_5_VisionAttention, Qwen2_5_VisionBlock, Qwen2_5_VisionPatchEmbed,
Qwen2_5_VisionPatchMerger, Qwen2_5_VisionTransformer,
Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLImageInputs,
Qwen2_5_VLVideoInputs)
from vllm.model_executor.models.qwen2_vl import (Qwen2VisionAttention,
Qwen2VisionBlock,
Qwen2VisionPatchEmbed,
Qwen2VisionPatchMerger,
Qwen2VisionTransformer)
from vllm.model_executor.models.utils import cast_overflow_tensors
from vllm.model_executor.models.vision import (
get_vit_attn_backend, run_dp_sharded_mrope_vision_model)
Qwen2_5_VisionAttention, Qwen2_5_VLForConditionalGeneration,
Qwen2_5_VLImageInputs, Qwen2_5_VLVideoInputs)
from vllm.model_executor.models.qwen2_vl import Qwen2VisionAttention
from vllm.model_executor.models.vision import run_dp_sharded_mrope_vision_model
import vllm_ascend.envs as envs_ascend
from vllm_ascend.ascend_forward_context import set_ascend_forward_context
@@ -130,468 +108,6 @@ class AscendQwen2_5_VisionAttention(nn.Module):
return output
class AscendQwen2VisionBlock(nn.Module):
def forward(
self,
x: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb_cos: torch.Tensor,
rotary_pos_emb_sin: torch.Tensor,
max_seqlen: int | None = None, # Only used for Flash Attention
) -> torch.Tensor:
x = x + self.attn(
self.norm1(x),
cu_seqlens=cu_seqlens,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
max_seqlen=max_seqlen,
)
x = x + self.mlp(self.norm2(x))
return x
class AscendQwen2VisionTransformer(nn.Module):
def __init__(
self,
vision_config: Qwen2VLVisionConfig,
norm_eps: float = 1e-6,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
use_data_parallel: bool = False,
attn_backend_override: AttentionBackendEnum | None = None,
) -> None:
nn.Module.__init__(self)
patch_size = vision_config.patch_size
temporal_patch_size = vision_config.temporal_patch_size
spatial_merge_size = vision_config.spatial_merge_size
in_channels = vision_config.in_channels
hidden_size = vision_config.hidden_size
embed_dim = vision_config.embed_dim
depth = vision_config.depth
num_heads = vision_config.num_heads
mlp_ratio = vision_config.mlp_ratio
self.use_data_parallel = use_data_parallel
self.out_hidden_size = vision_config.hidden_size
self.spatial_merge_size = spatial_merge_size
self.num_heads = num_heads
self.embed_dim = embed_dim
self.patch_embed = Qwen2VisionPatchEmbed(
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
in_channels=in_channels,
embed_dim=embed_dim,
)
norm_layer = partial(nn.LayerNorm, eps=norm_eps)
head_dim = embed_dim // num_heads
self.rotary_pos_emb = get_rope(
head_size=head_dim,
rotary_dim=head_dim // 2,
max_position=8192,
is_neox_style=True,
)
self.blocks = nn.ModuleList([
Qwen2VisionBlock(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
norm_layer=norm_layer,
quant_config=quant_config,
prefix=f"{prefix}.blocks.{layer_idx}",
use_data_parallel=use_data_parallel,
attn_backend_override=attn_backend_override,
) for layer_idx in range(depth)
])
self.merger = Qwen2VisionPatchMerger(
d_model=hidden_size,
context_dim=embed_dim,
norm_layer=norm_layer,
quant_config=quant_config,
prefix=f"{prefix}.merger",
use_data_parallel=use_data_parallel,
)
self.attn_backend = get_vit_attn_backend(
head_size=head_dim,
dtype=torch.get_default_dtype(),
attn_backend_override=attn_backend_override,
)
def rot_pos_emb(
self,
grid_thw: list[list[int]]) -> tuple[torch.Tensor, torch.Tensor]:
pos_ids = []
max_grid_size = 0
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
hpos_ids = (hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
).permute(0, 2, 1, 3).flatten())
wpos_ids = (wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
).permute(0, 2, 1, 3).flatten())
pos_ids.append(
torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
max_grid_size = max(max_grid_size, h, w)
pos_ids = torch.cat(pos_ids, dim=0)
# Use pre-computed cos_sin_cache from RotaryEmbedding
cos, sin = self.rotary_pos_emb.get_cos_sin(max_grid_size)
# (num_tokens, rotary_dim // 2)
cos_h = cos[pos_ids[:, 0]] # type: ignore
cos_w = cos[pos_ids[:, 1]] # type: ignore
sin_h = sin[pos_ids[:, 0]] # type: ignore
sin_w = sin[pos_ids[:, 1]] # type: ignore
cos_combined = torch.cat([cos_h, cos_w], dim=-1)
sin_combined = torch.cat([sin_h, sin_w], dim=-1)
return cos_combined, sin_combined
def forward(
self,
x: torch.Tensor,
grid_thw: torch.Tensor | list[list[int]],
) -> torch.Tensor:
# patchify
x = x.to(device=self.device, dtype=self.dtype)
x = self.patch_embed(x)
if isinstance(grid_thw, list):
grid_thw_list = grid_thw
grid_thw = torch.tensor(grid_thw, dtype=torch.int32)
else:
grid_thw_list = grid_thw.tolist()
# compute position embedding
rotary_pos_emb_cos, rotary_pos_emb_sin = self.rot_pos_emb(
grid_thw_list)
# compute cu_seqlens
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2],
grid_thw[:, 0]).cumsum(
dim=0, dtype=torch.int32)
cu_seqlens = torch.cat([cu_seqlens.new_zeros(1), cu_seqlens])
cu_seqlens = cu_seqlens.to(self.device, non_blocking=True)
# transformers
x = x.unsqueeze(1)
# pre-compute seqlens for attn mask to reduce cuMemcpy operations
max_seqlen = self.compute_attn_mask_seqlen(cu_seqlens)
for blk in self.blocks:
x = blk(
x,
cu_seqlens=cu_seqlens,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
max_seqlen=max_seqlen,
)
# adapter
x = self.merger(x)
return x
class AscendQwen2_5_VisionBlock(nn.Module):
def forward(
self,
x: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb_cos: torch.Tensor,
rotary_pos_emb_sin: torch.Tensor,
max_seqlen: torch.Tensor, # Only used for Flash Attention
) -> torch.Tensor:
x_attn = self.attn(
self.norm1(x),
cu_seqlens=cu_seqlens,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
max_seqlen=max_seqlen,
)
x_fused_norm, residual = self.norm2(x, residual=x_attn)
x = residual + self.mlp(x_fused_norm)
return x
class AscendQwen2_5_VisionTransformer(nn.Module):
def __init__(
self,
vision_config: Qwen2_5_VLVisionConfig,
norm_eps: float = 1e-6,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
use_data_parallel: bool = False,
attn_backend_override: AttentionBackendEnum | None = None,
) -> None:
nn.Module.__init__(self)
patch_size = vision_config.patch_size
temporal_patch_size = vision_config.temporal_patch_size
in_channels = vision_config.in_channels
depth = vision_config.depth
self.hidden_size = vision_config.hidden_size
self.num_heads = vision_config.num_heads
self.use_data_parallel = use_data_parallel
self.out_hidden_size = vision_config.out_hidden_size
# args for get_window_index_thw
self.window_size = vision_config.window_size
self.patch_size = vision_config.patch_size
self.spatial_merge_size = vision_config.spatial_merge_size
self.fullatt_block_indexes = vision_config.fullatt_block_indexes
self.spatial_merge_unit = self.spatial_merge_size**2
# TODO[@lucaskabela]: Investigate fixing this usage
# see https://github.com/vllm-project/vllm/issues/27044
# DO NOT MOVE THIS IMPORT
from vllm.compilation.backends import set_model_tag
with set_model_tag("Qwen2_5_VisionPatchEmbed"):
self.patch_embed = Qwen2_5_VisionPatchEmbed(
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
in_channels=in_channels,
hidden_size=self.hidden_size,
)
norm_layer = partial(RMSNorm, eps=norm_eps)
head_dim = self.hidden_size // self.num_heads
self.rotary_pos_emb = get_rope(
head_size=head_dim,
rotary_dim=head_dim // 2,
max_position=8192,
is_neox_style=True,
)
self.attn_backend = get_vit_attn_backend(
head_size=head_dim,
dtype=torch.get_default_dtype(),
attn_backend_override=attn_backend_override,
)
self.attn_backend, self.flash_attn_varlen_func = (
maybe_get_vit_flash_attn_backend(
self.attn_backend,
attn_backend_override=attn_backend_override,
))
with set_model_tag("Qwen2_5_VisionBlock"):
self.blocks = nn.ModuleList([
Qwen2_5_VisionBlock(
dim=self.hidden_size,
num_heads=self.num_heads,
mlp_hidden_dim=vision_config.intermediate_size,
act_fn=get_act_and_mul_fn(vision_config.hidden_act),
norm_layer=norm_layer,
quant_config=quant_config,
prefix=f"{prefix}.blocks.{layer_idx}",
use_data_parallel=use_data_parallel,
attn_backend=self.attn_backend,
attn_backend_override=attn_backend_override,
) for layer_idx in range(depth)
])
with set_model_tag("Qwen2_5_VisionPatchMerger"):
self.merger = Qwen2_5_VisionPatchMerger(
d_model=vision_config.out_hidden_size,
context_dim=self.hidden_size,
norm_layer=norm_layer,
spatial_merge_size=self.spatial_merge_size,
quant_config=quant_config,
prefix=f"{prefix}.merger",
use_data_parallel=use_data_parallel,
)
def rotary_pos_emb_thw(self, t, h, w):
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
hpos_ids = (hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
).permute(0, 2, 1, 3).flatten())
wpos_ids = (wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
).permute(0, 2, 1, 3).flatten())
pos_ids = torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)
max_size = max(h, w)
# Use pre-computed cos_sin_cache from RotaryEmbedding
cos, sin = self.rotary_pos_emb.get_cos_sin(max_size)
cos_h = cos[pos_ids[:, 0]] # (num_tokens, rotary_dim // 2)
cos_w = cos[pos_ids[:, 1]]
sin_h = sin[pos_ids[:, 0]]
sin_w = sin[pos_ids[:, 1]]
cos_combined = torch.cat([cos_h, cos_w], dim=-1)
sin_combined = torch.cat([sin_h, sin_w], dim=-1)
cos_combined = cos_combined.reshape(
cos_combined.shape[0] // self.spatial_merge_unit,
self.spatial_merge_unit,
-1,
)
sin_combined = sin_combined.reshape(
sin_combined.shape[0] // self.spatial_merge_unit,
self.spatial_merge_unit,
-1,
)
return cos_combined, sin_combined
@lru_cache(maxsize=1024) # noqa: B019
def get_rope_by_thw(self, t, h, w):
window_index_thw, cu_seqlens_window_thw = self.get_window_index_thw(
t, h, w)
cos_thw, sin_thw = self.rotary_pos_emb_thw(t, h, w)
cos_thw = cos_thw[window_index_thw, :, :]
cos_thw = cos_thw.flatten(start_dim=0, end_dim=1)
sin_thw = sin_thw[window_index_thw, :, :]
sin_thw = sin_thw.flatten(start_dim=0, end_dim=1)
cu_seqlens_thw = torch.repeat_interleave(
torch.tensor([h * w], dtype=torch.int32), t)
return (
cos_thw,
sin_thw,
window_index_thw,
cu_seqlens_window_thw,
cu_seqlens_thw,
)
def forward(
self,
x: torch.Tensor,
grid_thw: list[list[int]],
) -> torch.Tensor:
# patchify
seq_len, _ = x.size()
rotary_pos_emb_cos: list = []
rotary_pos_emb_sin: list = []
window_index: list = []
cu_window_seqlens: list = [torch.tensor([0], dtype=torch.int32)]
cu_seqlens: list = []
hidden_states = x.to(device=self.device, dtype=self.dtype)
hidden_states = self.patch_embed(hidden_states)
window_index_id = 0
cu_window_seqlens_last = 0
for t, h, w in grid_thw:
t, h, w = int(t), int(h), int(w)
llm_h = h // self.spatial_merge_size
llm_w = w // self.spatial_merge_size
(
cos_thw,
sin_thw,
window_index_thw,
cu_seqlens_window_thw,
cu_seqlens_thw,
) = self.get_rope_by_thw(t, h, w)
window_index.append(window_index_thw + window_index_id)
window_index_id += t * llm_h * llm_w
cu_seqlens_window_thw = cu_seqlens_window_thw + cu_window_seqlens_last
cu_window_seqlens_last = cu_seqlens_window_thw[-1]
cu_window_seqlens.append(cu_seqlens_window_thw)
rotary_pos_emb_cos.append(cos_thw)
rotary_pos_emb_sin.append(sin_thw)
cu_seqlens.append(cu_seqlens_thw)
rotary_pos_emb_cos = torch.cat(rotary_pos_emb_cos)
rotary_pos_emb_sin = torch.cat(rotary_pos_emb_sin)
window_index = torch.cat(window_index)
# compute reverse indices
reverse_indices = self.invert_permutation(window_index)
cu_window_seqlens = torch.cat(cu_window_seqlens)
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
cu_seqlens = torch.cat(cu_seqlens)
cu_seqlens = torch.cumsum(cu_seqlens, dim=0, dtype=torch.int32)
cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)
# transformers
# pre-compute seqlens for window/full attn to reduce cuMemcpy operations
max_seqlen_full = self.compute_attn_mask_seqlen(cu_seqlens)
max_seqlen_window = self.compute_attn_mask_seqlen(cu_window_seqlens)
cu_seqlens = cu_seqlens.to( # type: ignore[attr-defined]
device=self.device,
non_blocking=True)
cu_window_seqlens = cu_window_seqlens.to( # type: ignore[attr-defined]
device=self.device,
non_blocking=True)
rotary_pos_emb_cos = rotary_pos_emb_cos.to( # type: ignore[attr-defined]
device=self.device,
non_blocking=True)
rotary_pos_emb_sin = rotary_pos_emb_sin.to( # type: ignore[attr-defined]
device=self.device,
non_blocking=True)
window_index = window_index.to( # type: ignore[attr-defined]
device=hidden_states.device,
non_blocking=True)
reverse_indices = reverse_indices.to(device=hidden_states.device,
non_blocking=True)
hidden_states = hidden_states.reshape(
seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
hidden_states = hidden_states[window_index, :, :]
hidden_states = hidden_states.reshape(seq_len, -1)
hidden_states = hidden_states.unsqueeze(1)
for layer_num, blk in enumerate(self.blocks):
if layer_num in self.fullatt_block_indexes:
cu_seqlens_now = cu_seqlens
max_seqlen_now = max_seqlen_full
else:
cu_seqlens_now = cu_window_seqlens
max_seqlen_now = max_seqlen_window
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens_now,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
max_seqlen=max_seqlen_now,
)
# For Qwen2.5-VL-3B, float16 will overflow at last block
# for long visual tokens sequences.
if hidden_states.dtype == torch.float16:
hidden_states = cast_overflow_tensors(hidden_states)
# adapter
hidden_states = self.merger(hidden_states)
hidden_states = hidden_states[reverse_indices, :]
return hidden_states
class AscendQwen2_5_VLForConditionalGeneration(nn.Module):
def _process_image_input(
@@ -650,14 +166,6 @@ class AscendQwen2_5_VLForConditionalGeneration(nn.Module):
return video_embeds.split(sizes)
def _apply_rotary_pos_emb_vision(t: torch.Tensor, cos: torch.Tensor,
sin: torch.Tensor) -> torch.Tensor:
rotary_emb_function = dispatch_rotary_emb_function(
default=partial(apply_rotary_emb_torch, is_neox_style=True))
output = rotary_emb_function(t, cos, sin).type_as(t)
return output
# NOTE: This will be removed after MMEncoderAttention has been extract as a CustomOp in vllm.
Qwen2VisionAttention.forward = AscendQwen2_5_VisionAttention.forward
Qwen2_5_VisionAttention.forward = AscendQwen2_5_VisionAttention.forward
@@ -665,15 +173,3 @@ Qwen2_5_VisionAttention.forward = AscendQwen2_5_VisionAttention.forward
# NOTE: These will be removed after https://github.com/vllm-project/vllm/pull/29388 is merged.
Qwen2_5_VLForConditionalGeneration._process_image_input = AscendQwen2_5_VLForConditionalGeneration._process_image_input
Qwen2_5_VLForConditionalGeneration._process_video_input = AscendQwen2_5_VLForConditionalGeneration._process_video_input
# NOTE: These will be removed after vllm-ascend is aligned with vllm latest main.
Qwen2VisionBlock.forward = AscendQwen2VisionBlock.forward
Qwen2VisionTransformer.__init__ = AscendQwen2VisionTransformer.__init__
Qwen2VisionTransformer.rot_pos_emb = AscendQwen2VisionTransformer.rot_pos_emb
Qwen2VisionTransformer.forward = AscendQwen2VisionTransformer.forward
Qwen2_5_VisionBlock.forward = AscendQwen2_5_VisionBlock.forward
Qwen2_5_VisionTransformer.__init__ = AscendQwen2_5_VisionTransformer.__init__
Qwen2_5_VisionTransformer.rotary_pos_emb_thw = AscendQwen2_5_VisionTransformer.rotary_pos_emb_thw
Qwen2_5_VisionTransformer.get_rope_by_thw = AscendQwen2_5_VisionTransformer.get_rope_by_thw
Qwen2_5_VisionTransformer.forward = AscendQwen2_5_VisionTransformer.forward
apply_rotary_pos_emb_vision = _apply_rotary_pos_emb_vision

View File

@@ -15,167 +15,14 @@
# limitations under the License.
#
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from transformers.models.qwen3_vl.configuration_qwen3_vl import \
Qwen3VLVisionConfig
from vllm.attention.backends.registry import AttentionBackendEnum
from vllm.model_executor.layers.activation import _ACTIVATION_REGISTRY
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.models.qwen3_vl import (Qwen3_VisionBlock,
Qwen3_VisionPatchEmbed,
Qwen3_VisionPatchMerger,
Qwen3_VisionTransformer)
from vllm.model_executor.models.vision import get_vit_attn_backend
class AscendQwen3_VisionBlock(nn.Module):
def forward(
self,
x: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb_cos: torch.Tensor,
rotary_pos_emb_sin: torch.Tensor,
max_seqlen: torch.Tensor, # Only used for Flash Attention
) -> torch.Tensor:
x = x + self.attn(
self.norm1(x),
cu_seqlens=cu_seqlens,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
max_seqlen=max_seqlen,
)
x = x + self.mlp(self.norm2(x))
return x
from vllm.model_executor.models.qwen3_vl import Qwen3_VisionTransformer
class AscendQwen3_VisionTransformer(nn.Module):
def __init__(
self,
vision_config: Qwen3VLVisionConfig,
norm_eps: float = 1e-6,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
use_data_parallel: bool = False,
attn_backend_override: AttentionBackendEnum | None = None,
) -> None:
nn.Module.__init__(self)
self.hidden_size = vision_config.hidden_size
self.num_heads = vision_config.num_heads
self.num_position_embeddings = vision_config.num_position_embeddings
self.patch_size = vision_config.patch_size
self.spatial_merge_size = vision_config.spatial_merge_size
self.spatial_merge_unit = self.spatial_merge_size**2
self.temporal_patch_size = vision_config.temporal_patch_size
self.deepstack_visual_indexes = vision_config.deepstack_visual_indexes
self.use_data_parallel = use_data_parallel
self.num_grid_per_side = int(self.num_position_embeddings**0.5)
# NOTE: This is used for creating empty tensor for all_gather for
# DP ViT. Here out_hidden_size is enlarged due to deepstack
self.out_hidden_size = vision_config.out_hidden_size * (
1 + len(self.deepstack_visual_indexes))
self.patch_embed = Qwen3_VisionPatchEmbed(
patch_size=self.patch_size,
temporal_patch_size=self.temporal_patch_size,
in_channels=vision_config.in_channels,
hidden_size=self.hidden_size,
)
self.pos_embed = nn.Embedding(self.num_position_embeddings,
self.hidden_size)
norm_layer = partial(nn.LayerNorm, eps=norm_eps)
head_dim = self.hidden_size // self.num_heads
self.rotary_pos_emb = get_rope(
head_size=head_dim,
rotary_dim=head_dim // 2,
max_position=8192,
is_neox_style=True,
)
self.merger = Qwen3_VisionPatchMerger(
d_model=vision_config.out_hidden_size,
context_dim=self.hidden_size,
norm_layer=norm_layer,
spatial_merge_size=self.spatial_merge_size,
quant_config=quant_config,
prefix=f"{prefix}.merger",
use_data_parallel=use_data_parallel,
)
self.deepstack_merger_list = nn.ModuleList([
Qwen3_VisionPatchMerger(
d_model=vision_config.out_hidden_size,
context_dim=self.hidden_size,
spatial_merge_size=self.spatial_merge_size,
use_postshuffle_norm=True,
norm_layer=norm_layer,
quant_config=quant_config,
prefix=f"{prefix}.deepstack_merger_list.{layer_idx}",
use_data_parallel=use_data_parallel,
) for layer_idx in range(len(self.deepstack_visual_indexes))
])
self.attn_backend = get_vit_attn_backend(
head_size=head_dim,
dtype=torch.get_default_dtype(),
attn_backend_override=attn_backend_override,
)
if self.attn_backend not in {
AttentionBackendEnum.FLASH_ATTN,
AttentionBackendEnum.TORCH_SDPA,
AttentionBackendEnum.ROCM_AITER_FA,
}:
raise RuntimeError(
f"Qwen3-VL does not support {self.attn_backend} backend now.")
self.blocks = nn.ModuleList([
Qwen3_VisionBlock(
dim=self.hidden_size,
num_heads=self.num_heads,
mlp_hidden_dim=vision_config.intermediate_size,
act_fn=_ACTIVATION_REGISTRY[vision_config.hidden_act],
norm_layer=norm_layer,
quant_config=quant_config,
prefix=f"{prefix}.blocks.{layer_idx}",
use_data_parallel=use_data_parallel,
attn_backend=self.attn_backend,
) for layer_idx in range(vision_config.depth)
])
def rot_pos_emb(self, grid_thw: list[list[int]]):
max_grid_size = max(max(h, w) for _, h, w in grid_thw)
pos_ids = [
self.rot_pos_ids(h, w, self.spatial_merge_size) if t == 1 else
self.rot_pos_ids(h, w, self.spatial_merge_size).repeat(t, 1)
for t, h, w in grid_thw
]
pos_ids = torch.cat(pos_ids, dim=0)
# Use pre-computed cos_sin_cache from RotaryEmbedding
cos, sin = self.rotary_pos_emb.get_cos_sin(max_grid_size)
# (num_tokens, rotary_dim // 2)
cos_h = cos[pos_ids[:, 0]] # type: ignore
cos_w = cos[pos_ids[:, 1]] # type: ignore
sin_h = sin[pos_ids[:, 0]] # type: ignore
sin_w = sin[pos_ids[:, 1]] # type: ignore
cos_combined = torch.cat([cos_h, cos_w], dim=-1)
sin_combined = torch.cat([sin_h, sin_w], dim=-1)
return cos_combined, sin_combined
def forward(
self,
x: torch.Tensor,
@@ -234,8 +81,5 @@ class AscendQwen3_VisionTransformer(nn.Module):
return hidden_states
# NOTE: These will be removed after vllm-ascend is aligned with vllm latest main.
Qwen3_VisionBlock.forward = AscendQwen3_VisionBlock.forward
Qwen3_VisionTransformer.__init__ = AscendQwen3_VisionTransformer.__init__
Qwen3_VisionTransformer.rot_pos_emb = AscendQwen3_VisionTransformer.rot_pos_emb
# NOTE: This will be removed after implementing multimodal_cpu_fields in vllm-ascend model_runner.
Qwen3_VisionTransformer.forward = AscendQwen3_VisionTransformer.forward