Files
2026-03-10 13:31:25 +08:00

243 lines
9.3 KiB
Python

################################################################################
# Copyright(c)2020-2025 Shanghai Biren Technology Co., Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# SPDX-License-Identifier: Apache-2.0
# adapted from https://huggingface.co/OpenGVLab/InternVL2-4B/blob/main/modeling_intern_vit.py
# --------------------------------------------------------
# InternVL
# Copyright (c) 2023 OpenGVLab
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from typing import Optional
import torch
import torch_br
from fastcore.basics import patch_to
from transformers import PretrainedConfig
from vllm.model_executor.layers.quantization import QuantizationConfig
# isort: off
from vllm.model_executor.models.intern_vit import (InternMLP,
InternVisionEmbeddings,
InternVisionModel,
InternVisionEncoder)
from vllm.model_executor.models.intern_vit import InternParallelAttention
from vllm.distributed.parallel_state import get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size
from vllm.distributed.utils import divide
from vllm.model_executor.layers.layernorm import RMSNorm
# isort: on
@patch_to(InternVisionModel)
def __init__(
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig] = None,
*,
num_hidden_layers_override: Optional[int] = None,
num_dummy_heads: int = 0,
prefix: str = "",
use_data_parallel: bool = False,
) -> None:
"""
[Patch] enable data parallelism for InternVisionModel
"""
super(InternVisionModel, self).__init__()
self.config = config
self.use_data_parallel = use_data_parallel
self.embeddings = InternVisionEmbeddings(config)
self.encoder = InternVisionEncoder(
config=config,
quant_config=None,
num_hidden_layers_override=num_hidden_layers_override,
num_dummy_heads=num_dummy_heads,
prefix=f"{prefix}.encoder",
use_data_parallel=use_data_parallel,
)
@patch_to(InternVisionEmbeddings)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
target_dtype = self.patch_embedding.weight.dtype
if self.patch_size == 14:
import torch_br.supa._debug as supa_debug
supa_debug.set_disable_zero_ws(False)
supa_debug.set_disable_zero_output_uma(False)
supa_debug.set_disable_zero_output_numa(False)
supa_debug.set_disable_reorder_zero(False)
patch_embeds = torch_br.supa_conv2d_knxn_snxn_p0x0_fwd(
pixel_values.to(dtype=target_dtype), self.patch_embedding.weight,
self.patch_size, self.patch_size, 0)
if self.patch_embedding.bias is not None:
patch_embeds += self.patch_embedding.bias[None, :, None, None]
supa_debug.set_disable_zero_ws(True)
supa_debug.set_disable_zero_output_uma(True)
supa_debug.set_disable_zero_output_numa(True)
supa_debug.set_disable_reorder_zero(True)
else:
patch_embeds = self.patch_embedding(pixel_values.to(
target_dtype)) # shape = [*, channel, width, height]
batch_size, _, height, width = patch_embeds.shape
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1,
-1).to(target_dtype)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if self.patch_embedding.bias is None:
position_embedding = self._get_position_embedding(height, width)
else:
position_embedding = torch.cat([
self.position_embedding[:, :1, :],
self._get_pos_embed(self.position_embedding[:, 1:, :], height,
width)
],
dim=1)
embeddings = embeddings + position_embedding.to(target_dtype)
return embeddings
@patch_to(InternParallelAttention)
def __init__(
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig] = None,
*,
num_dummy_heads: int = 0,
prefix: str = "",
use_data_parallel: bool = False,
) -> None:
super(InternParallelAttention, self).__init__()
# [Patch] enable data parallelism
self.use_data_parallel = True
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads '
f'(got `embed_dim`: {self.embed_dim} and `num_heads`:'
f' {self.num_heads}).')
self.tp_size = (1 if use_data_parallel else
get_tensor_model_parallel_world_size())
self.tp_rank = (0
if use_data_parallel else get_tensor_model_parallel_rank())
# Additional dummy heads are used to enable TP for common GPU counts.
self.dummy_dim = (num_dummy_heads + self.num_heads) * self.head_dim
self.num_heads_per_partition = divide(num_dummy_heads + self.num_heads,
self.tp_size)
assert self.tp_size == 1
self.scale = self.head_dim**-0.5
# self.qkv = QKVParallelLinear(
# self.embed_dim,
# self.head_dim,
# num_dummy_heads + self.num_heads,
# bias=config.qkv_bias,
# quant_config=quant_config,
# prefix=f"{prefix}.qkv",
# disable_tp=use_data_parallel,
# )
self.qkv = torch.nn.Linear(self.embed_dim,
3 * self.dummy_dim,
bias=config.qkv_bias)
self.qk_normalization = config.qk_normalization
if self.qk_normalization:
self.q_norm = RMSNorm(self.dummy_dim,
eps=config.layer_norm_eps,
var_hidden_size=self.embed_dim)
self.k_norm = RMSNorm(self.dummy_dim,
eps=config.layer_norm_eps,
var_hidden_size=self.embed_dim)
# self.proj = RowParallelLinear(
# self.dummy_dim,
# self.embed_dim,
# quant_config=quant_config,
# prefix=f"{prefix}.proj",
# disable_tp=use_data_parallel,
# )
self.proj = torch.nn.Linear(self.dummy_dim, self.embed_dim)
# self.attn = MultiHeadAttention(self.num_heads_per_partition,
# self.head_dim, self.scale)
@patch_to(InternParallelAttention)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, N, C = x.shape
x_tmp = []
for i in range(B):
qkv = self.qkv(x[i:i + 1, :]).reshape(1, N, 3, self.num_heads,
C // self.num_heads)
q, k, v = qkv.unbind(
2) # make torchscript happy (cannot use tensor as tuple)
if self.qk_normalization:
q = self.q_norm(q.flatten(-2, -1)).view(1, N, self.num_heads,
qkv.shape[4])
k = self.k_norm(k.flatten(-2, -1)).view(1, N, self.num_heads,
qkv.shape[4])
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
attn = ((q * self.scale) @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
# x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x0 = attn[:, :, :, :512] @ v[:, :, :512, :]
x1 = attn[:, :, :, 512:] @ v[:, :, 512:, :]
x_tmp.append((x0 + x1).transpose(1, 2).reshape(1, N, C))
x = torch.cat(x_tmp, dim=0)
x = self.proj(x)
return x
@patch_to(InternMLP)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if hidden_states.shape[0] > 1:
output = torch_br._empty_ut_only(hidden_states.shape,
"COLMAJOR",
is_numa=False,
sbp="BB",
axis=0,
dtype=torch.bfloat16)
for i in range(hidden_states.shape[0]):
hidden_states_tmp, _ = self.fc1(hidden_states[i:i + 1, :, :])
hidden_states_tmp = self.activation_fn(hidden_states_tmp)
hidden_states_tmp, _ = self.fc2(hidden_states_tmp)
hidden_states_tmp += self.fc2.bias[None, None, :]
output[i] = hidden_states_tmp[0]
return output
else:
hidden_states, _ = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states, _ = self.fc2(hidden_states)
return hidden_states