### What this PR does / why we need it?
Refactor model structure in qwen3_next.py to reduce code line.
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
```
def main():
prompts = [
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen3-Next-80B-A3B-Instruct",
tensor_parallel_size=4,
enforce_eager=True,
trust_remote_code=True,
max_model_len=256,
gpu_memory_utilization=0.7,
block_size=64,
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
- vLLM version: v0.10.2
- vLLM main:
https://github.com/vllm-project/vllm/commit/releases/v0.11.0
---------
Signed-off-by: Icey <1790571317@qq.com>
57 lines
2.1 KiB
Python
57 lines
2.1 KiB
Python
from vllm import ModelRegistry
|
|
|
|
import vllm_ascend.envs as envs_ascend
|
|
|
|
|
|
def register_model():
|
|
ModelRegistry.register_model(
|
|
"Qwen2VLForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_vl:AscendQwen2VLForConditionalGeneration")
|
|
|
|
ModelRegistry.register_model(
|
|
"Qwen3VLMoeForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_5_vl_without_padding:AscendQwen3VLMoeForConditionalGeneration"
|
|
)
|
|
|
|
ModelRegistry.register_model(
|
|
"Qwen3VLForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_5_vl_without_padding:AscendQwen3VLForConditionalGeneration"
|
|
)
|
|
|
|
if envs_ascend.USE_OPTIMIZED_MODEL:
|
|
ModelRegistry.register_model(
|
|
"Qwen2_5_VLForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_5_vl:AscendQwen2_5_VLForConditionalGeneration"
|
|
)
|
|
else:
|
|
ModelRegistry.register_model(
|
|
"Qwen2_5_VLForConditionalGeneration",
|
|
"vllm_ascend.models.qwen2_5_vl_without_padding:AscendQwen2_5_VLForConditionalGeneration_Without_Padding"
|
|
)
|
|
|
|
ModelRegistry.register_model(
|
|
"DeepseekV2ForCausalLM",
|
|
"vllm_ascend.models.deepseek_v2:CustomDeepseekV2ForCausalLM")
|
|
|
|
ModelRegistry.register_model(
|
|
"DeepseekV3ForCausalLM",
|
|
"vllm_ascend.models.deepseek_v2:CustomDeepseekV3ForCausalLM")
|
|
|
|
ModelRegistry.register_model(
|
|
"DeepSeekMTPModel",
|
|
"vllm_ascend.models.deepseek_mtp:CustomDeepSeekMTP")
|
|
|
|
ModelRegistry.register_model(
|
|
"Qwen3MoeForCausalLM",
|
|
"vllm_ascend.models.qwen3_moe:CustomQwen3MoeForCausalLM")
|
|
|
|
# There is no PanguProMoEForCausalLM in vLLM, so we should register it before vLLM config initialization
|
|
# to make sure the model can be loaded correctly. This register step can be removed once vLLM support PanguProMoEForCausalLM.
|
|
ModelRegistry.register_model(
|
|
"PanguProMoEForCausalLM",
|
|
"vllm_ascend.torchair.models.torchair_pangu_moe:PanguProMoEForCausalLM"
|
|
)
|
|
ModelRegistry.register_model(
|
|
"Qwen3NextForCausalLM",
|
|
"vllm_ascend.models.qwen3_next:CustomQwen3NextForCausalLM")
|