[Fix] Skip visual layers when applying LoRA to Qwen2VL modules (#11519)

This commit is contained in:
Antonin Vidon
2025-10-17 18:39:57 -04:00
committed by GitHub
parent fdd7c69d65
commit 2614adf9ca

View File

@@ -28,7 +28,6 @@ from typing import Iterable, List, Optional, Tuple, Type, TypedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from transformers import Qwen2VLConfig
from transformers.models.qwen2_vl.configuration_qwen2_vl import Qwen2VLVisionConfig
@@ -514,6 +513,10 @@ class Qwen2VLForConditionalGeneration(nn.Module):
def get_input_embeddings(self):
return self.model.embed_tokens
def should_apply_lora(self, module_name: str) -> bool:
# skip visual tower
return not module_name.startswith("visual")
def forward(
self,
input_ids: torch.Tensor,