[MM][Patch] Remove patch for cos/sin cache (#4672)
### What this PR does / why we need it? Remove patch for https://github.com/vllm-project/vllm/pull/28798. - vLLM version: v0.12.0 Signed-off-by: shen-shanshan <467638484@qq.com>
This commit is contained in:
@@ -15,167 +15,14 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from functools import partial
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from transformers.models.qwen3_vl.configuration_qwen3_vl import \
|
||||
Qwen3VLVisionConfig
|
||||
from vllm.attention.backends.registry import AttentionBackendEnum
|
||||
from vllm.model_executor.layers.activation import _ACTIVATION_REGISTRY
|
||||
from vllm.model_executor.layers.quantization import QuantizationConfig
|
||||
from vllm.model_executor.layers.rotary_embedding import get_rope
|
||||
from vllm.model_executor.models.qwen3_vl import (Qwen3_VisionBlock,
|
||||
Qwen3_VisionPatchEmbed,
|
||||
Qwen3_VisionPatchMerger,
|
||||
Qwen3_VisionTransformer)
|
||||
from vllm.model_executor.models.vision import get_vit_attn_backend
|
||||
|
||||
|
||||
class AscendQwen3_VisionBlock(nn.Module):
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
cu_seqlens: torch.Tensor,
|
||||
rotary_pos_emb_cos: torch.Tensor,
|
||||
rotary_pos_emb_sin: torch.Tensor,
|
||||
max_seqlen: torch.Tensor, # Only used for Flash Attention
|
||||
) -> torch.Tensor:
|
||||
x = x + self.attn(
|
||||
self.norm1(x),
|
||||
cu_seqlens=cu_seqlens,
|
||||
rotary_pos_emb_cos=rotary_pos_emb_cos,
|
||||
rotary_pos_emb_sin=rotary_pos_emb_sin,
|
||||
max_seqlen=max_seqlen,
|
||||
)
|
||||
|
||||
x = x + self.mlp(self.norm2(x))
|
||||
return x
|
||||
from vllm.model_executor.models.qwen3_vl import Qwen3_VisionTransformer
|
||||
|
||||
|
||||
class AscendQwen3_VisionTransformer(nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vision_config: Qwen3VLVisionConfig,
|
||||
norm_eps: float = 1e-6,
|
||||
quant_config: QuantizationConfig | None = None,
|
||||
prefix: str = "",
|
||||
use_data_parallel: bool = False,
|
||||
attn_backend_override: AttentionBackendEnum | None = None,
|
||||
) -> None:
|
||||
nn.Module.__init__(self)
|
||||
|
||||
self.hidden_size = vision_config.hidden_size
|
||||
self.num_heads = vision_config.num_heads
|
||||
self.num_position_embeddings = vision_config.num_position_embeddings
|
||||
self.patch_size = vision_config.patch_size
|
||||
self.spatial_merge_size = vision_config.spatial_merge_size
|
||||
self.spatial_merge_unit = self.spatial_merge_size**2
|
||||
self.temporal_patch_size = vision_config.temporal_patch_size
|
||||
self.deepstack_visual_indexes = vision_config.deepstack_visual_indexes
|
||||
self.use_data_parallel = use_data_parallel
|
||||
self.num_grid_per_side = int(self.num_position_embeddings**0.5)
|
||||
|
||||
# NOTE: This is used for creating empty tensor for all_gather for
|
||||
# DP ViT. Here out_hidden_size is enlarged due to deepstack
|
||||
self.out_hidden_size = vision_config.out_hidden_size * (
|
||||
1 + len(self.deepstack_visual_indexes))
|
||||
|
||||
self.patch_embed = Qwen3_VisionPatchEmbed(
|
||||
patch_size=self.patch_size,
|
||||
temporal_patch_size=self.temporal_patch_size,
|
||||
in_channels=vision_config.in_channels,
|
||||
hidden_size=self.hidden_size,
|
||||
)
|
||||
|
||||
self.pos_embed = nn.Embedding(self.num_position_embeddings,
|
||||
self.hidden_size)
|
||||
|
||||
norm_layer = partial(nn.LayerNorm, eps=norm_eps)
|
||||
head_dim = self.hidden_size // self.num_heads
|
||||
self.rotary_pos_emb = get_rope(
|
||||
head_size=head_dim,
|
||||
rotary_dim=head_dim // 2,
|
||||
max_position=8192,
|
||||
is_neox_style=True,
|
||||
)
|
||||
|
||||
self.merger = Qwen3_VisionPatchMerger(
|
||||
d_model=vision_config.out_hidden_size,
|
||||
context_dim=self.hidden_size,
|
||||
norm_layer=norm_layer,
|
||||
spatial_merge_size=self.spatial_merge_size,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.merger",
|
||||
use_data_parallel=use_data_parallel,
|
||||
)
|
||||
|
||||
self.deepstack_merger_list = nn.ModuleList([
|
||||
Qwen3_VisionPatchMerger(
|
||||
d_model=vision_config.out_hidden_size,
|
||||
context_dim=self.hidden_size,
|
||||
spatial_merge_size=self.spatial_merge_size,
|
||||
use_postshuffle_norm=True,
|
||||
norm_layer=norm_layer,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.deepstack_merger_list.{layer_idx}",
|
||||
use_data_parallel=use_data_parallel,
|
||||
) for layer_idx in range(len(self.deepstack_visual_indexes))
|
||||
])
|
||||
|
||||
self.attn_backend = get_vit_attn_backend(
|
||||
head_size=head_dim,
|
||||
dtype=torch.get_default_dtype(),
|
||||
attn_backend_override=attn_backend_override,
|
||||
)
|
||||
|
||||
if self.attn_backend not in {
|
||||
AttentionBackendEnum.FLASH_ATTN,
|
||||
AttentionBackendEnum.TORCH_SDPA,
|
||||
AttentionBackendEnum.ROCM_AITER_FA,
|
||||
}:
|
||||
raise RuntimeError(
|
||||
f"Qwen3-VL does not support {self.attn_backend} backend now.")
|
||||
self.blocks = nn.ModuleList([
|
||||
Qwen3_VisionBlock(
|
||||
dim=self.hidden_size,
|
||||
num_heads=self.num_heads,
|
||||
mlp_hidden_dim=vision_config.intermediate_size,
|
||||
act_fn=_ACTIVATION_REGISTRY[vision_config.hidden_act],
|
||||
norm_layer=norm_layer,
|
||||
quant_config=quant_config,
|
||||
prefix=f"{prefix}.blocks.{layer_idx}",
|
||||
use_data_parallel=use_data_parallel,
|
||||
attn_backend=self.attn_backend,
|
||||
) for layer_idx in range(vision_config.depth)
|
||||
])
|
||||
|
||||
def rot_pos_emb(self, grid_thw: list[list[int]]):
|
||||
max_grid_size = max(max(h, w) for _, h, w in grid_thw)
|
||||
pos_ids = [
|
||||
self.rot_pos_ids(h, w, self.spatial_merge_size) if t == 1 else
|
||||
self.rot_pos_ids(h, w, self.spatial_merge_size).repeat(t, 1)
|
||||
for t, h, w in grid_thw
|
||||
]
|
||||
pos_ids = torch.cat(pos_ids, dim=0)
|
||||
|
||||
# Use pre-computed cos_sin_cache from RotaryEmbedding
|
||||
cos, sin = self.rotary_pos_emb.get_cos_sin(max_grid_size)
|
||||
|
||||
# (num_tokens, rotary_dim // 2)
|
||||
cos_h = cos[pos_ids[:, 0]] # type: ignore
|
||||
cos_w = cos[pos_ids[:, 1]] # type: ignore
|
||||
sin_h = sin[pos_ids[:, 0]] # type: ignore
|
||||
sin_w = sin[pos_ids[:, 1]] # type: ignore
|
||||
|
||||
cos_combined = torch.cat([cos_h, cos_w], dim=-1)
|
||||
sin_combined = torch.cat([sin_h, sin_w], dim=-1)
|
||||
|
||||
return cos_combined, sin_combined
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
@@ -234,8 +81,5 @@ class AscendQwen3_VisionTransformer(nn.Module):
|
||||
return hidden_states
|
||||
|
||||
|
||||
# NOTE: These will be removed after vllm-ascend is aligned with vllm latest main.
|
||||
Qwen3_VisionBlock.forward = AscendQwen3_VisionBlock.forward
|
||||
Qwen3_VisionTransformer.__init__ = AscendQwen3_VisionTransformer.__init__
|
||||
Qwen3_VisionTransformer.rot_pos_emb = AscendQwen3_VisionTransformer.rot_pos_emb
|
||||
# NOTE: This will be removed after implementing multimodal_cpu_fields in vllm-ascend model_runner.
|
||||
Qwen3_VisionTransformer.forward = AscendQwen3_VisionTransformer.forward
|
||||
|
||||
Reference in New Issue
Block a user