### What this PR does / why we need it?
This PR deletes ~2K lines of code about deepseek modeling. It falls back
CustomDeepseekV2 modules to original vllm implementations and adapts
some modifications in vllm about deepseek and moe.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
E2E vllm serving with torchair graph mode and eager mode.
- vLLM version: v0.10.2
- vLLM main:
759ef49b15
---------
Signed-off-by: linfeng-yuan <1102311262@qq.com>
Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com>
Co-authored-by: yiz-liu <136800916+yiz-liu@users.noreply.github.com>
Co-authored-by: Yizhou Liu <liu_yizhou@outlook.com>
50 lines
1.8 KiB
Python
50 lines
1.8 KiB
Python
from vllm import ModelRegistry
|
|
|
|
import vllm_ascend.envs as envs_ascend
|
|
|
|
|
|
def register_model():
|
|
ModelRegistry.register_model(
|
|
"Qwen2VLForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_vl:AscendQwen2VLForConditionalGeneration")
|
|
|
|
if envs_ascend.USE_OPTIMIZED_MODEL:
|
|
ModelRegistry.register_model(
|
|
"Qwen2_5_VLForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_5_vl:AscendQwen2_5_VLForConditionalGeneration"
|
|
)
|
|
else:
|
|
ModelRegistry.register_model(
|
|
"Qwen2_5_VLForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_5_vl_without_padding:AscendQwen2_5_VLForConditionalGeneration_Without_Padding"
|
|
)
|
|
|
|
ModelRegistry.register_model(
|
|
"DeepseekV2ForCausalLM",
|
|
"vllm_ascend.models.deepseek_v2:CustomDeepseekV2ForCausalLM")
|
|
|
|
ModelRegistry.register_model(
|
|
"DeepseekV3ForCausalLM",
|
|
"vllm_ascend.models.deepseek_v3:CustomDeepseekV3ForCausalLM")
|
|
|
|
ModelRegistry.register_model(
|
|
"DeepSeekMTPModel",
|
|
"vllm_ascend.models.deepseek_mtp:CustomDeepSeekMTP")
|
|
|
|
ModelRegistry.register_model(
|
|
"Qwen3MoeForCausalLM",
|
|
"vllm_ascend.models.qwen3_moe:CustomQwen3MoeForCausalLM")
|
|
|
|
ModelRegistry.register_model(
|
|
"Qwen3ForCausalLM", "vllm_ascend.models.qwen3:CustomQwen3ForCausalLM")
|
|
|
|
# There is no PanguProMoEForCausalLM in vLLM, so we should register it before vLLM config initialization
|
|
# to make sure the model can be loaded correctly. This register step can be removed once vLLM support PanguProMoEForCausalLM.
|
|
ModelRegistry.register_model(
|
|
"PanguProMoEForCausalLM",
|
|
"vllm_ascend.torchair.models.torchair_pangu_moe:PanguProMoEForCausalLM"
|
|
)
|
|
ModelRegistry.register_model(
|
|
"Qwen3NextForCausalLM",
|
|
"vllm_ascend.models.qwen3_next:Qwen3NextForCausalLM")
|