[Feat]support sequence parallelism by pass for VL models (#5632)

This commit is contained in:
realliujiaxu
2026-02-27 08:27:41 +08:00
committed by GitHub
parent ed175d6d92
commit 5def28dcd3
22 changed files with 460 additions and 101 deletions

View File

@@ -0,0 +1,64 @@
import os
import pytest
from vllm import SamplingParams
from tests.e2e.conftest import VllmRunner
from tests.e2e.model_utils import check_outputs_equal
MODELS = [
"Qwen/Qwen3-VL-2B-Instruct",
]
@pytest.mark.parametrize("model", MODELS)
def test_qwen3_vl_sp_tp2(model: str) -> None:
prompts = [
"Hello, my name is", "The capital of the United States is",
"The capital of France is", "The future of AI is"
]
sampling_params = SamplingParams(max_tokens=10, temperature=0.0)
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
compilation_config={
"cudagraph_capture_sizes": [2, 4],
"cudagraph_mode": "FULL_DECODE_ONLY",
"pass_config": {"enable_sp": False}
},
additional_config={"npugraph_ex_config": {"enable": False}}
) as runner:
no_sp_outputs = runner.model.generate(prompts, sampling_params)
with VllmRunner(
model,
max_model_len=1024,
tensor_parallel_size=2,
compilation_config={
"cudagraph_capture_sizes": [2, 4],
"cudagraph_mode": "FULL_DECODE_ONLY",
"pass_config": {"enable_sp": True}
},
additional_config={"sp_threshold": 10, "npugraph_ex_config": {"enable": False}}
) as runner:
sp_outputs = runner.model.generate(
prompts, sampling_params)
no_sp_outputs_list = []
for output in no_sp_outputs:
no_sp_outputs_list.append(
(output.outputs[0].index, output.outputs[0].text))
sp_outputs_list = []
for output in sp_outputs:
sp_outputs_list.append(
(output.outputs[0].index, output.outputs[0].text))
check_outputs_equal(
outputs_0_lst=no_sp_outputs_list,
outputs_1_lst=sp_outputs_list,
name_0="no_sp_outputs",
name_1="sp_outputs",
)

View File

@@ -157,7 +157,7 @@ class TestAscendMultiHeadLatentAttention(TestBase):
hidden_states = torch.randn(3, self.hidden_size)
mock_forward_context = MagicMock(spec=ForwardContext)
mock_forward_context.sp_enabled = False
mock_forward_context.flash_comm_v1_enabled = False
mock_get_forward_context.return_value = mock_forward_context
mock_mla_forward.return_value = (3, self.hidden_size)

View File

@@ -390,7 +390,7 @@ class TestEagleProposerDummyRun(TestBase):
# cpu does not support parallel-group, let alone `sp`
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context",
**{"return_value.sp_enabled": False})
**{"return_value.flash_comm_v1_enabled": False})
@patch("vllm_ascend.spec_decode.eagle_proposer.set_ascend_forward_context")
def test_dummy_run_basic(self, mock_context, mock_get_context):
num_tokens = 32
@@ -406,7 +406,7 @@ class TestEagleProposerDummyRun(TestBase):
# cpu does not support parallel-group, let alone `sp`
@patch("vllm_ascend.spec_decode.eagle_proposer.get_forward_context",
**{"return_value.sp_enabled": False})
**{"return_value.flash_comm_v1_enabled": False})
@patch("vllm_ascend.spec_decode.eagle_proposer.set_ascend_forward_context")
def test_dummy_run_with_prefill(self, mock_context, mock_get_context):
mock_context.return_value.__enter__.return_value = None
@@ -426,7 +426,7 @@ class TestEagleProposerDummyRun(TestBase):
mock_return_context.cudagraph_runtime_mode = CUDAGraphMode.FULL
mock_return_context.capturing = True
# cpu does not support parallel-group, let alone `sp`
mock_return_context.sp_enabled = False
mock_return_context.flash_comm_v1_enabled = False
mock_get_context.return_value = mock_return_context
self.proposer.use_cuda_graph = True
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`
@@ -449,7 +449,7 @@ class TestEagleProposerDummyRun(TestBase):
mock_return_context.cudagraph_runtime_mode = CUDAGraphMode.FULL
mock_return_context.capturing = False
# cpu does not support parallel-group, let alone `sp`
mock_return_context.sp_enabled = False
mock_return_context.flash_comm_v1_enabled = False
mock_get_context.return_value = mock_return_context
self.proposer.use_cuda_graph = True
# cpu does not support `torch.ops.vllm.maybe_pad_and_reduce`