fix: second_per_grid_ts should be used to get mrope position (#3682)
This commit is contained in:
@@ -125,12 +125,15 @@ class Qwen2_5_VisionBlock(nn.Module):
|
||||
if attn_implementation == "sdpa":
|
||||
use_context_forward = False
|
||||
softmax_in_single_precision = False
|
||||
flatten_batch = True
|
||||
elif attn_implementation == "flash_attention_2":
|
||||
softmax_in_single_precision = False
|
||||
use_context_forward = True
|
||||
flatten_batch = True
|
||||
elif attn_implementation == "eager":
|
||||
softmax_in_single_precision = True
|
||||
use_context_forward = False
|
||||
flatten_batch = True
|
||||
|
||||
self.attn = VisionAttention(
|
||||
embed_dim=dim,
|
||||
@@ -139,7 +142,7 @@ class Qwen2_5_VisionBlock(nn.Module):
|
||||
use_qkv_parallel=False,
|
||||
use_context_forward=use_context_forward,
|
||||
softmax_in_single_precision=softmax_in_single_precision,
|
||||
flatten_batch=True,
|
||||
flatten_batch=flatten_batch,
|
||||
quant_config=quant_config,
|
||||
prefix=add_prefix("attn", prefix),
|
||||
)
|
||||
@@ -192,9 +195,10 @@ class Qwen2_5_VisionPatchEmbed(nn.Module):
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
target_dtype = self.proj.weight.dtype
|
||||
L, C = x.shape
|
||||
x = x.view(L, -1, self.temporal_patch_size, self.patch_size, self.patch_size)
|
||||
x = self.proj(x).view(L, self.embed_dim)
|
||||
x = self.proj(x.to(dtype=target_dtype)).view(L, self.embed_dim)
|
||||
return x
|
||||
|
||||
|
||||
@@ -246,35 +250,15 @@ class Qwen2_5_VisionRotaryEmbedding(nn.Module):
|
||||
|
||||
def __init__(self, dim: int, theta: float = 10000.0) -> None:
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.theta = theta
|
||||
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
self._seq_len_cached = 0
|
||||
self._freqs_cached = None
|
||||
|
||||
def update_freqs_cache(self, seqlen: int) -> None:
|
||||
if seqlen > self._seq_len_cached:
|
||||
seqlen *= 2
|
||||
self._seq_len_cached = seqlen
|
||||
self.inv_freq = 1.0 / (
|
||||
self.theta
|
||||
** (
|
||||
torch.arange(
|
||||
0, self.dim, 2, dtype=torch.float, device=self.inv_freq.device
|
||||
)
|
||||
/ self.dim
|
||||
)
|
||||
)
|
||||
seq = torch.arange(
|
||||
seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype
|
||||
)
|
||||
freqs = torch.outer(seq, self.inv_freq)
|
||||
self._freqs_cached = freqs
|
||||
|
||||
def forward(self, seqlen: int) -> torch.Tensor:
|
||||
self.update_freqs_cache(seqlen)
|
||||
return self._freqs_cached[:seqlen]
|
||||
seq = torch.arange(
|
||||
seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype
|
||||
)
|
||||
freqs = torch.outer(seq, self.inv_freq)
|
||||
return freqs
|
||||
|
||||
|
||||
class Qwen2_5_VisionTransformer(nn.Module):
|
||||
@@ -293,7 +277,7 @@ class Qwen2_5_VisionTransformer(nn.Module):
|
||||
spatial_merge_size: int = vision_config.spatial_merge_size
|
||||
self.spatial_merge_size = spatial_merge_size
|
||||
self.spatial_merge_unit: int = spatial_merge_size * spatial_merge_size
|
||||
in_chans: int = vision_config.in_chans
|
||||
in_chans: int = vision_config.in_channels
|
||||
hidden_size: int = vision_config.hidden_size
|
||||
depth: int = vision_config.depth
|
||||
num_heads: int = vision_config.num_heads
|
||||
@@ -393,27 +377,24 @@ class Qwen2_5_VisionTransformer(nn.Module):
|
||||
pos_ids = []
|
||||
for t, h, w in grid_thw:
|
||||
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
|
||||
hpos_ids = hpos_ids.reshape(
|
||||
h // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
w // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
)
|
||||
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
|
||||
hpos_ids = hpos_ids.flatten()
|
||||
|
||||
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
|
||||
hpos_ids = (
|
||||
hpos_ids.reshape(
|
||||
h // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
w // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
)
|
||||
.permute(0, 2, 1, 3)
|
||||
.flatten()
|
||||
)
|
||||
wpos_ids = (
|
||||
wpos_ids.reshape(
|
||||
h // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
w // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
)
|
||||
.permute(0, 2, 1, 3)
|
||||
.flatten()
|
||||
wpos_ids = wpos_ids.reshape(
|
||||
h // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
w // self.spatial_merge_size,
|
||||
self.spatial_merge_size,
|
||||
)
|
||||
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
|
||||
wpos_ids = wpos_ids.flatten()
|
||||
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
|
||||
pos_ids = torch.cat(pos_ids, dim=0)
|
||||
max_grid_size = grid_thw[:, 1:].max()
|
||||
@@ -437,7 +418,7 @@ class Qwen2_5_VisionTransformer(nn.Module):
|
||||
cu_window_seqlens = torch.tensor(
|
||||
cu_window_seqlens,
|
||||
device=x.device,
|
||||
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
|
||||
dtype=torch.int32,
|
||||
)
|
||||
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
|
||||
|
||||
@@ -610,7 +591,8 @@ class Qwen2_5_VLForConditionalGeneration(nn.Module):
|
||||
start_idx = extend_start_loc_cpu[i]
|
||||
prefix_len = prefix_lens_cpu[i]
|
||||
|
||||
pixel_values = image.pixel_values.clone().detach().requires_grad_(False)
|
||||
pixel_values = image.pixel_values.to(device="cuda")
|
||||
|
||||
image_grid_thws = torch.tensor(
|
||||
np.array(image.image_grid_thws), device="cuda"
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user