feat: remove the dependency on FusedMoE (#2153)

This commit is contained in:
Yineng Zhang
2024-11-24 20:09:27 +08:00
committed by GitHub
parent dbe1729395
commit b509db5832
7 changed files with 1602 additions and 7 deletions

View File

@@ -27,7 +27,6 @@ from vllm.distributed import (
get_tp_group,
tensor_model_parallel_all_reduce,
)
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
@@ -42,6 +41,7 @@ from sglang.srt.layers.linear import (
from sglang.srt.layers.logits_processor import LogitsProcessor
from sglang.srt.layers.quantization.base_config import QuantizationConfig
from sglang.srt.layers.radix_attention import RadixAttention
from sglang.srt.layers.triton_fused_moe import FusedMoE
from sglang.srt.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,