first commit

This commit is contained in:
2026-03-10 13:31:25 +08:00
parent ba974cecfa
commit b62b889355
2604 changed files with 438977 additions and 0 deletions

View File

@@ -0,0 +1,25 @@
################################################################################
# Copyright(c)2020-2025 Shanghai Biren Technology Co., Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from .attention import AttentionSplit
from .mla import SupaMLAModules, SupaMultiHeadLatentAttention
from .mlp import LlamaMlpSiluL3, MergedGateUpMLPSiluL2
from .moe import DeepseekV2MoE
__all__ = [
'LlamaMlpSiluL3', 'AttentionSplit', 'MergedGateUpMLPSiluL2',
'DeepseekV2MoE', 'SupaMLAModules', 'SupaMultiHeadLatentAttention'
]

View File

@@ -0,0 +1,206 @@
################################################################################
# Copyright(c)2020-2025 Shanghai Biren Technology Co., Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from typing import Any, Optional, Tuple
import torch
import torch_br
from torch import nn
from torch_br.supa.profiler_kineto import record_function
from vllm.attention import Attention, AttentionType
from vllm.config import CacheConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.forward_context import ForwardContext, get_forward_context
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
RowParallelLinear)
from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig)
from vllm.model_executor.layers.rotary_embedding import (MRotaryEmbedding,
get_rope)
from vllm.model_executor.models.utils import extract_layer_index
class AttentionSplit(nn.Module):
def __init__(
self,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
max_position: int = 4096 * 32,
rope_theta: int = 10000,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
rope_scaling: Optional[Tuple] = None,
attn_type: str = AttentionType.DECODER,
prefix: str = "",
dual_chunk_attention_config: Optional[dict[str, Any]] = None,
bias: bool = False,
) -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = hidden_size // self.total_num_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.rope_theta = rope_theta
qconfig = None
if quant_config is not None and quant_config.qkv_quantized:
qconfig = quant_config
self.q_proj = ColumnParallelLinear(input_size=hidden_size,
output_size=self.q_size * tp_size,
bias=bias,
quant_config=qconfig,
prefix=f"{prefix}.q_proj")
self.k_proj = ColumnParallelLinear(input_size=hidden_size,
output_size=self.kv_size * tp_size,
bias=bias,
quant_config=qconfig,
prefix=f"{prefix}.k_proj")
self.v_proj = ColumnParallelLinear(input_size=hidden_size,
output_size=self.kv_size * tp_size,
bias=bias,
quant_config=qconfig,
prefix=f"{prefix}.v_proj")
self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj")
self.rotary_emb = get_rope(
self.head_dim,
rotary_dim=self.head_dim,
max_position=max_position,
base=self.rope_theta,
rope_scaling=rope_scaling,
)
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
attn_type=attn_type,
prefix=f"{prefix}.attn",
**{
"layer_idx": extract_layer_index(prefix),
"dual_chunk_attention_config": dual_chunk_attention_config,
} if dual_chunk_attention_config else {})
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
forward_context: ForwardContext = get_forward_context()
attn_metadata = forward_context.attn_metadata
if attn_metadata is None:
## for dummy run
return hidden_states
seq_len = hidden_states.shape[-2]
decode_seql = 512
# numa weight and not use mrope (qwen-vl)
if ((hasattr(self.q_proj, "qweight")
and len(self.q_proj.qweight.shape) == 3) or
(hasattr(self.q_proj, "weight")
and len(self.q_proj.weight.shape) == 3)) and not isinstance(
self.rotary_emb, MRotaryEmbedding) and seq_len <= decode_seql:
if isinstance(attn_metadata, dict):
attn_metadata = attn_metadata[self.attn.layer_name]
kv_cache = self.attn.kv_cache[forward_context.virtual_engine]
if kv_cache is not None:
with record_function('attention qkv_rope'):
# int8 weight version
q_weight = self.q_proj.qweight if hasattr(
self.q_proj, "qweight") else self.q_proj.weight
k_weight = self.k_proj.qweight if hasattr(
self.k_proj, "qweight") else self.k_proj.weight
v_weight = self.v_proj.qweight if hasattr(
self.v_proj, "qweight") else self.v_proj.weight
q_scale = self.q_proj.scales if hasattr(
self.q_proj, "scales") else None
k_scale = self.k_proj.scales if hasattr(
self.k_proj, "scales") else None
v_scale = self.v_proj.scales if hasattr(
self.v_proj, "scales") else None
q_bias = self.q_proj.bias if hasattr(self.q_proj,
"bias") else None
k_bias = self.k_proj.bias if hasattr(self.k_proj,
"bias") else None
v_bias = self.v_proj.bias if hasattr(self.v_proj,
"bias") else None
q, k, v = torch_br.supa_qkv_rope_decode_infer(
hidden_states,
q_weight,
k_weight,
v_weight,
self.rotary_emb.sin_cache,
self.rotary_emb.cos_cache,
kv_cache,
positions,
attn_metadata.slot_mapping,
self.rotary_emb.head_size,
self.q_size,
self.kv_size,
q_scale=q_scale,
k_scale=k_scale,
v_scale=v_scale,
q_bias=q_bias,
k_bias=k_bias,
v_bias=v_bias)
if hasattr(attn_metadata, 'do_cache'):
attn_metadata.do_cache = False
with record_function('attention'):
attn_output = self.attn(q, k, v)
with record_function('attention o_proj'):
output, _ = self.o_proj(attn_output)
return output
else:
return hidden_states
else:
# uma weight or use mrope (qwen-vl)
q, _ = self.q_proj(hidden_states)
k, _ = self.k_proj(hidden_states)
v, _ = self.v_proj(hidden_states)
q, k = self.rotary_emb(positions, q, k)
if hasattr(attn_metadata, 'do_cache'):
attn_metadata.do_cache = True
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output

View File

@@ -0,0 +1,210 @@
################################################################################
# Copyright(c)2020-2025 Shanghai Biren Technology Co., Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from dataclasses import dataclass
from typing import Optional
import torch
from vllm.attention import Attention
from vllm.config import CacheConfig
from vllm.model_executor.custom_op import CustomOp
from vllm.model_executor.layers.mla import MLAModules
from vllm.model_executor.layers.quantization import QuantizationConfig
@dataclass
class SupaMLAModules(MLAModules):
q_a_proj: Optional[torch.nn.Module]
@CustomOp.register("supa_multi_head_latent_attention")
class SupaMultiHeadLatentAttention(CustomOp):
def __init__(
self,
hidden_size: int,
num_heads: int,
scale: float,
qk_nope_head_dim: int,
qk_rope_head_dim: int,
v_head_dim: int,
q_lora_rank: Optional[int],
kv_lora_rank: int,
mla_modules: MLAModules,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.v_head_dim = v_head_dim
self.q_lora_rank = q_lora_rank
self.kv_lora_rank = kv_lora_rank
self.num_heads = num_heads
self.fused_qkv_a_proj = mla_modules.fused_qkv_a_proj
self.kv_a_proj_with_mqa = mla_modules.kv_a_proj_with_mqa
self.q_a_layernorm = mla_modules.q_a_layernorm
self.q_b_proj = mla_modules.q_b_proj
self.q_proj = mla_modules.q_proj
self.kv_a_layernorm = mla_modules.kv_a_layernorm
self.kv_b_proj = mla_modules.kv_b_proj
self.rotary_emb = mla_modules.rotary_emb
self.o_proj = mla_modules.o_proj
self.indexer = mla_modules.indexer
self.is_sparse = mla_modules.is_sparse
self.q_a_proj = mla_modules.q_a_proj
if self.indexer is not None:
assert hasattr(self.indexer, "topk_tokens")
self.topk_tokens = self.indexer.topk_tokens
self.topk_indices_buffer = mla_modules.topk_indices_buffer
# In the MLA backend, kv_cache includes both k_c and
# pe (i.e. decoupled position embeddings). In particular,
# the concat_and_cache_mla op requires
# k_c.size(1) + k_pe.size(1) == kv_cache.size(2)
# i.e.
# kv_lora_rank + qk_rope_head_dim == head_size
if self.is_sparse:
self.mla_attn = Attention(
num_heads=self.num_heads,
head_size=self.kv_lora_rank + self.qk_rope_head_dim,
scale=scale,
num_kv_heads=1,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
use_mla=True,
use_sparse=mla_modules.is_sparse,
# MLA Args
q_lora_rank=self.q_lora_rank,
kv_lora_rank=self.kv_lora_rank,
qk_nope_head_dim=self.qk_nope_head_dim,
qk_rope_head_dim=self.qk_rope_head_dim,
qk_head_dim=self.qk_head_dim,
v_head_dim=self.v_head_dim,
kv_b_proj=self.kv_b_proj,
indexer=self.indexer,
)
else:
self.mla_attn = Attention(
num_heads=self.num_heads,
head_size=self.kv_lora_rank + self.qk_rope_head_dim,
scale=scale,
num_kv_heads=1,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
use_mla=True,
use_sparse=mla_modules.is_sparse,
# MLA Args
q_lora_rank=self.q_lora_rank,
kv_lora_rank=self.kv_lora_rank,
qk_nope_head_dim=self.qk_nope_head_dim,
qk_rope_head_dim=self.qk_rope_head_dim,
qk_head_dim=self.qk_head_dim,
v_head_dim=self.v_head_dim,
kv_b_proj=self.kv_b_proj,
indexer=self.indexer,
# BIREN args for fused MLA
rotary_emb=self.rotary_emb,
q_proj=self.q_proj
if self.q_lora_rank is None else self.q_b_proj,
o_proj=self.o_proj,
kv_a_proj_with_mqa=self.kv_a_proj_with_mqa,
kv_a_layernorm=self.kv_a_layernorm,
q_a_proj=None if self.q_lora_rank is None else self.q_a_proj,
q_a_layernorm=None
if self.q_lora_rank is None else self.q_a_layernorm,
)
self.prefix = prefix
self.debug_layer_idx = int(self.prefix.split(".")[-2])
def forward_native(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
q_c = None
kv_lora = None
if self.q_lora_rank is not None:
assert self.fused_qkv_a_proj is not None, \
"fused_qkv_a_proj is required when q_lora_rank is not None"
assert self.q_a_layernorm is not None, \
"q_a_layernorm is required when q_lora_rank is not None"
assert self.q_b_proj is not None, \
"q_b_proj is required when q_lora_rank is not None"
qkv_lora = self.fused_qkv_a_proj(hidden_states)[0]
q_c, kv_lora = qkv_lora.split(
[self.q_lora_rank, self.kv_lora_rank + self.qk_rope_head_dim],
dim=-1,
)
q_c = self.q_a_layernorm(q_c)
q = self.q_b_proj(q_c)[0].view(-1,
self.num_heads * self.qk_head_dim)
else:
assert self.kv_a_proj_with_mqa is not None, \
"kv_a_proj_with_mqa is required when q_lora_rank is None"
assert self.q_proj is not None, \
"q_proj is required when q_lora_rank is None"
kv_lora = self.kv_a_proj_with_mqa(hidden_states)[0]
q = self.q_proj(hidden_states)[0]
kv_lora = kv_lora.view(-1, self.kv_lora_rank + self.qk_rope_head_dim)
kv_c, k_pe = kv_lora.split([self.kv_lora_rank, self.qk_rope_head_dim],
dim=-1)
kv_c_normed = self.kv_a_layernorm(kv_c)
q = q.view(-1, self.num_heads, self.qk_head_dim)
# Add head dim of 1 to k_pe
k_pe = k_pe.unsqueeze(1)
q[..., self.qk_nope_head_dim:], k_pe = self.rotary_emb(
positions, q[..., self.qk_nope_head_dim:], k_pe)
if self.indexer and self.is_sparse:
_topk_indices = self.indexer(hidden_states, q_c, positions,
self.rotary_emb)
seq_len = hidden_states.shape[1]
attn_out = self.mla_attn(q,
kv_c_normed,
k_pe,
output_shape=(seq_len, self.num_heads *
self.v_head_dim))
return self.o_proj(attn_out)[0].unsqueeze(0)
def forward_supa(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
return self.mla_attn(hidden_states,
positions,
hidden_states,
output_shape=hidden_states.shape)
def forward_oot(self, *args, is_ds_v32: Optional[int], **kwargs):
if is_ds_v32:
return self.forward_native(*args, **kwargs)
else:
return self.forward_supa(*args, **kwargs)

View File

@@ -0,0 +1,170 @@
################################################################################
# Copyright(c)2020-2025 Shanghai Biren Technology Co., Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from typing import Optional
import torch_br
from torch import nn
from vllm.distributed import (get_tensor_model_parallel_world_size,
get_tp_group, tensor_model_parallel_all_reduce)
from vllm.distributed.parallel_state import (get_pp_group,
get_tensor_model_parallel_rank)
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.linear import (ColumnParallelLinear,
MergedColumnParallelLinear,
RowParallelLinear)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm_br import envs
from vllm_br.utils import get_grandparent_pid
class LlamaMlpSiluL3(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: Optional[QuantizationConfig] = None,
bias: bool = False,
prefix: str = "",
) -> None:
super().__init__()
self.gate_proj = ColumnParallelLinear(input_size=hidden_size,
output_size=intermediate_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.gate_proj")
self.up_proj = ColumnParallelLinear(input_size=hidden_size,
output_size=intermediate_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.up_proj")
self.down_proj = RowParallelLinear(intermediate_size,
hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.down_proj")
if hidden_act != "silu":
raise ValueError(f"Unsupported activation: {hidden_act}. "
"Only silu is supported for now.")
self.act_fn = SiluAndMul()
def forward(self, x):
gate, _ = self.gate_proj(x)
up, _ = self.up_proj(x)
x = torch_br.supa_silumul(gate, up)
x, _ = self.down_proj(x)
return x
class MergedGateUpMLPSiluL2(nn.Module):
"""
"""
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: Optional[QuantizationConfig] = None,
reduce_results: bool = True,
bias: bool = False,
prefix: str = "",
) -> None:
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
self.intermediate_size = intermediate_size
self.prefix = prefix
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size, [intermediate_size] * 2,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj")
self.gate_up_proj.has_cross_weight = True
self.down_proj = RowParallelLinear(intermediate_size,
hidden_size,
bias=bias,
quant_config=quant_config,
reduce_results=reduce_results,
prefix=f"{prefix}.down_proj")
if hidden_act != "silu":
raise ValueError(f"Unsupported activation: {hidden_act}. "
"Only silu is supported for now.")
self.act_fn = SiluAndMul()
def forward(self, x):
if envs.VLLM_BR_USE_CPU_ALL_REDUCE != 0 and not hasattr(
self, "grandparent_pid"):
self.grandparent_pid = get_grandparent_pid()
if "shared_experts" not in self.prefix:
quant_flag = hasattr(self.gate_up_proj, "qweight")
hidden_size = x.shape[-1]
seq_len = x.shape[-2]
gu_weight = self.gate_up_proj.qweight if quant_flag else self.gate_up_proj.weight
gu_scales = self.gate_up_proj.scales if quant_flag else None
gate_up_output = torch_br.br_fused_mlp_infer(
x, [gu_weight],
output_w=self.intermediate_size // self.tp_size,
scales=[gu_scales] if gu_scales is not None else None,
activation_mode="act_swiglu")
down_weight = self.down_proj.qweight if quant_flag else self.down_proj.weight
down_scales = self.down_proj.scales if quant_flag else None
# bypass tp8 and tp4pp2 allreduce
pp_size = get_pp_group().world_size
all_rank = self.tp_size * pp_size
support_types = ((16, 4), (32, 2), (32, 4))
if all_rank <= envs.VLLM_BR_USE_FUSED_ALLREDUCE and seq_len <= envs.VLLM_BR_STATIC_MOE_DECODER_MAX_LEN and \
(envs.VLLM_BR_DEVICE_SPC_NUM, self.tp_size) in support_types:
tp_rank = get_tp_group().rank_in_group
global_rank = get_tp_group().rank
rank_i = global_rank % self.tp_size
assert rank_i == tp_rank
down_output = torch_br.supa_fused_linear_allreduce_opt(
gate_up_output,
down_weight,
hidden_size,
tp_rank,
self.tp_size,
global_rank,
0,
scales=down_scales)
return down_output
else:
down_output = torch_br.br_fused_mlp_infer(
gate_up_output, [down_weight],
output_w=hidden_size,
scales=[down_scales] if down_scales is not None else None)
if self.tp_size > 1:
out = down_output
if envs.VLLM_BR_USE_CPU_ALL_REDUCE != 0 and self.tp_size >= 4 and out.shape[
1] <= 32:
tp_rank = get_tensor_model_parallel_rank()
output = torch_br.supa_allreduce_pcie_infer(
out, tp_rank, self.tp_size, self.grandparent_pid)
else:
output = tensor_model_parallel_all_reduce(out)
return output
else:
return down_output
else:
return self.gate_up_proj.weight, self.down_proj.weight

View File

@@ -0,0 +1,116 @@
################################################################################
# Copyright(c)2020-2025 Shanghai Biren Technology Co., Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from typing import Optional
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.distributed import (get_tensor_model_parallel_world_size,
tensor_model_parallel_all_reduce)
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.linear import ReplicatedLinear
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.models.deepseek_v2 import (DeepseekV2MLP,
ParallelConfig)
from vllm_br import envs
from vllm_br.utils import get_grandparent_pid
class DeepseekV2MoE(nn.Module):
def __init__(
self,
config: PretrainedConfig,
parallel_config: ParallelConfig,
quant_config: Optional[QuantizationConfig] = None,
prefix: str = "",
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
self.routed_scaling_factor = config.routed_scaling_factor
self.n_shared_experts = config.n_shared_experts
self.static_moe_decoder_max_len = 512
self.is_sequence_parallel = parallel_config.use_sequence_parallel_moe
if config.hidden_act != "silu":
raise ValueError(f"Unsupported activation: {config.hidden_act}. "
"Only silu is supported for now.")
self.gate = ReplicatedLinear(config.hidden_size,
config.n_routed_experts,
bias=False,
quant_config=None,
prefix=f"{prefix}.gate")
if config.topk_method == "noaux_tc":
self.gate.e_score_correction_bias = nn.Parameter(
torch.empty(config.n_routed_experts, device="cpu"))
else:
self.gate.e_score_correction_bias = None
self.experts = FusedMoE(
num_experts=config.n_routed_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.moe_intermediate_size,
reduce_results=False,
renormalize=config.norm_topk_prob,
quant_config=quant_config,
use_grouped_topk=True,
num_expert_group=config.n_group,
topk_group=config.topk_group,
prefix=f"{prefix}.experts",
scoring_func=config.scoring_func,
e_score_correction_bias=self.gate.e_score_correction_bias)
if config.n_shared_experts is not None:
intermediate_size = (config.moe_intermediate_size *
config.n_shared_experts)
self.shared_experts = DeepseekV2MLP(
hidden_size=config.hidden_size,
intermediate_size=intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
reduce_results=False,
prefix=f"{prefix}.shared_experts",
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if envs.VLLM_BR_USE_CPU_ALL_REDUCE != 0 and not hasattr(
self, "grandparent_pid"):
self.grandparent_pid = get_grandparent_pid()
orig_shape = hidden_states.shape
assert self.n_shared_experts is not None, 'n_shared_experts must be set'
# NOTE: gate has been fused with shared_experts, no more single gate call
# and we packed router weights, shared_experts weights and down weights in a tuple
tuple_router_shared_expert_weight = (
self.gate.weight, self.shared_experts.gate_up_proj.weight,
self.shared_experts.down_proj.weight)
hidden_states = hidden_states.view(-1, orig_shape[-1])
final_hidden_states = self.experts(
hidden_states=hidden_states,
router_logits=tuple_router_shared_expert_weight)
if hasattr(final_hidden_states, 'all_reduced'):
# NOTE: this flag indicates that the final_hidden_states has been reduced in fused_moe
delattr(final_hidden_states, 'all_reduced')
elif self.tp_size > 1:
final_hidden_states = tensor_model_parallel_all_reduce(
final_hidden_states)
return final_hidden_states.view(orig_shape)