[BugFix] Adapted Qwen3-Next eager mode to v0.11.2 (#4477)

### What this PR does / why we need it?

Adapted Qwen3-Next eager mode to `v0.11.2`.


- vLLM version: v0.11.2
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.2

Signed-off-by: drslark <slarksblood@qq.com>
This commit is contained in:
drslark
2025-11-27 17:44:59 +08:00
committed by GitHub
parent b220de33e8
commit 1cae3e4a49
3 changed files with 16 additions and 13 deletions

View File

@@ -24,7 +24,6 @@ Run `pytest tests/e2e/multicard/test_qwen3_next.py`.
import os
from unittest.mock import patch
import pytest
from modelscope import snapshot_download # type: ignore
from tests.e2e.conftest import VllmRunner
@@ -64,7 +63,6 @@ def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY():
del vllm_model
@pytest.mark.skip(reason="Fix me, the accuracy is not correct")
def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
example_prompts = [
"Hello, my name is",
@@ -74,11 +72,14 @@ def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
]
max_tokens = 20
with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct",
tensor_parallel_size=4,
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp") as vllm_model:
with VllmRunner(
"Qwen/Qwen3-Next-80B-A3B-Instruct",
tensor_parallel_size=4,
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp",
enforce_eager=True,
) as vllm_model:
ref_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
del vllm_model
@@ -87,6 +88,7 @@ def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp",
enforce_eager=True,
additional_config={
"ascend_scheduler_config": {
"enabled": True,