Upgrade to vllm 0.17.0 corex v4.1 overlay

This commit is contained in:
2026-04-29 19:38:22 +08:00
parent 8fac6062e4
commit 938d0854a5
430 changed files with 35969 additions and 14511 deletions

View File

@@ -18,6 +18,7 @@ import torch.nn as nn
import torch.nn.functional as F
from transformers import CLIPVisionConfig
from vllm.model_executor.custom_op import PluggableLayer
from vllm.model_executor.layers.attention import MMEncoderAttention
from vllm.model_executor.layers.conv import Conv2dLayer
from vllm.model_executor.layers.quantization import QuantizationConfig
@@ -263,9 +264,13 @@ class Block(nn.Module):
return x
class RelPosAttention(nn.Module):
# --8<-- [start:rel_pos_attention]
@PluggableLayer.register("rel_pos_attention")
class RelPosAttention(PluggableLayer):
"""Multi-head Attention block with relative position embeddings."""
# --8<-- [end:rel_pos_attention]
def __init__(
self,
dim: int,