[BugFix] Adapted Qwen3-Next eager mode to v0.11.2 (#4477)

### What this PR does / why we need it?

Adapted Qwen3-Next eager mode to `v0.11.2`.


- vLLM version: v0.11.2
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.2

Signed-off-by: drslark <slarksblood@qq.com>
This commit is contained in:
drslark
2025-11-27 17:44:59 +08:00
committed by GitHub
parent b220de33e8
commit 1cae3e4a49
3 changed files with 16 additions and 13 deletions

View File

@@ -58,6 +58,7 @@ INPUT_PROMPTS = [
]
@pytest.mark.skip(reason="Fix me, the accuracy is not correct")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [50])
def test_prefix_cache_with_v1_scheduler(model: str, max_tokens: int) -> None:

View File

@@ -24,7 +24,6 @@ Run `pytest tests/e2e/multicard/test_qwen3_next.py`.
import os
from unittest.mock import patch
import pytest
from modelscope import snapshot_download # type: ignore
from tests.e2e.conftest import VllmRunner
@@ -64,7 +63,6 @@ def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY():
del vllm_model
@pytest.mark.skip(reason="Fix me, the accuracy is not correct")
def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
example_prompts = [
"Hello, my name is",
@@ -74,11 +72,14 @@ def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
]
max_tokens = 20
with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct",
tensor_parallel_size=4,
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp") as vllm_model:
with VllmRunner(
"Qwen/Qwen3-Next-80B-A3B-Instruct",
tensor_parallel_size=4,
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp",
enforce_eager=True,
) as vllm_model:
ref_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
del vllm_model
@@ -87,6 +88,7 @@ def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp",
enforce_eager=True,
additional_config={
"ascend_scheduler_config": {
"enabled": True,

View File

@@ -675,7 +675,7 @@ class CustomQwen3NextGatedDeltaNet(Qwen3NextGatedDeltaNet, MambaBase):
initial_state[~has_initial_state, ...] = 0
batch_size = initial_state.shape[0]
core_attn_out = []
temp_core_attn_out = []
last_recurrent_state = []
for b_idx in range(batch_size):
@@ -702,18 +702,18 @@ class CustomQwen3NextGatedDeltaNet(Qwen3NextGatedDeltaNet, MambaBase):
use_qk_l2norm_in_kernel=True,
)
core_attn_out.append(cur_core_attn_out_non_spec)
temp_core_attn_out.append(cur_core_attn_out_non_spec)
last_recurrent_state.append(cur_last_recurrent_state)
tar_dtype = core_attn_out[0].dtype
tar_device = core_attn_out[0].device
tar_shape = list(core_attn_out[0].shape)
tar_dtype = temp_core_attn_out[0].dtype
tar_device = temp_core_attn_out[0].device
tar_shape = list(temp_core_attn_out[0].shape)
tar_shape[1] = non_spec_query_start_loc[-1]
core_attn_out_non_spec = torch.empty(tar_shape,
dtype=tar_dtype,
device=tar_device)
for b_idx in range(batch_size):
cur_core_attn_out = core_attn_out[b_idx]
cur_core_attn_out = temp_core_attn_out[b_idx]
start, end = non_spec_query_start_loc[
b_idx], non_spec_query_start_loc[b_idx + 1]
core_attn_out_non_spec[:, start:end, ...] = cur_core_attn_out