[Feature] support aclgraph for model runner v2 (#7110)
### What this PR does / why we need it?
This PR aims to support aclgraph for model runner v2, please see RFC
#5208. The PR contains these modifications:
- adapt to newest commit of vllm main branch.
- supply a unified interface of extra forward context for both model
runner v1 and model runner v2.
- implement graph mode for main model.
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM version: v0.16.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -119,11 +119,9 @@ def mock_dist_env(mocker: MockerFixture):
|
||||
return_value=(torch.tensor([0, 1, 2, -1, -1, -1, -1, -1]), None, 0)), \
|
||||
patch('vllm_ascend.ops.fused_moe.fused_moe.get_forward_context',
|
||||
return_value=mock_forward_context_obj), \
|
||||
patch('vllm_ascend.ops.fused_moe.prepare_finalize.get_forward_context',
|
||||
patch('vllm_ascend.ascend_forward_context.get_forward_context',
|
||||
return_value=mock_forward_context_obj), \
|
||||
patch("vllm_ascend.utils.get_ascend_device_type", return_value=AscendDeviceType.A3), \
|
||||
patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context',
|
||||
return_value=mock_forward_context_obj), \
|
||||
patch('vllm_ascend.ops.fused_moe.moe_comm_method.MC2CommImpl._get_token_dispatcher',
|
||||
return_value=None), \
|
||||
patch('vllm_ascend.ops.fused_moe.moe_comm_method.AlltoAllCommImpl._get_token_dispatcher',
|
||||
@@ -298,7 +296,7 @@ class TestUnifiedApplyMLP(TestBase):
|
||||
|
||||
@patch('vllm_ascend.ops.fused_moe.moe_mlp.get_weight_prefetch_method',
|
||||
return_value=MagicMock())
|
||||
@patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context')
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch('vllm_ascend.utils.get_ascend_device_type',
|
||||
return_value=AscendDeviceType.A3)
|
||||
@patch('torch_npu.npu_grouped_matmul')
|
||||
@@ -407,7 +405,7 @@ class TestUnifiedApplyMLP(TestBase):
|
||||
@patch('vllm_ascend.ops.fused_moe.moe_mlp.HAS_TRITON', False)
|
||||
@patch('vllm_ascend.ops.fused_moe.moe_mlp.get_weight_prefetch_method',
|
||||
return_value=MagicMock())
|
||||
@patch('vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context')
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch('torch_npu.npu_grouped_matmul')
|
||||
@patch('torch_npu.npu_swiglu')
|
||||
@patch('torch_npu.npu_dynamic_quant')
|
||||
@@ -513,7 +511,7 @@ class TestUnifiedApplyMLP(TestBase):
|
||||
|
||||
@patch("vllm_ascend.ops.fused_moe.moe_mlp.get_weight_prefetch_method",
|
||||
return_value=MagicMock())
|
||||
@patch("vllm_ascend.ops.fused_moe.moe_mlp.get_forward_context")
|
||||
@patch("vllm_ascend.ascend_forward_context.get_forward_context")
|
||||
@patch("torch_npu.npu_grouped_matmul")
|
||||
@patch("torch_npu.npu_swiglu")
|
||||
@patch("torch_npu.npu_grouped_matmul_swiglu_quant")
|
||||
|
||||
@@ -121,9 +121,10 @@ class TestAscendMultiHeadLatentAttention(TestBase):
|
||||
@patch("vllm_ascend.ops.mla.get_ascend_config")
|
||||
@patch("vllm_ascend.ops.mla.get_tensor_model_parallel_world_size")
|
||||
@patch("vllm_ascend.ops.mla.get_forward_context")
|
||||
def test_forward(self, mock_get_forward_context, mock_tp_size,
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
def test_forward(self, mock_get_forward_context_2, mock_get_forward_context, mock_tp_size,
|
||||
mock_ascend_config, mock_get_vllm_config,
|
||||
mock_mla_forward):
|
||||
mock_mla_forward,):
|
||||
mock_tp_size.return_value = 1
|
||||
mock_ascend_config.return_value.enable_shared_expert_dp = False
|
||||
mock_vllm_config = MagicMock(spec=VllmConfig)
|
||||
@@ -159,6 +160,7 @@ class TestAscendMultiHeadLatentAttention(TestBase):
|
||||
mock_forward_context = MagicMock(spec=ForwardContext)
|
||||
mock_forward_context.flash_comm_v1_enabled = False
|
||||
mock_get_forward_context.return_value = mock_forward_context
|
||||
mock_get_forward_context_2.return_value = mock_forward_context
|
||||
|
||||
mock_mla_forward.return_value = (3, self.hidden_size)
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ class TestMoECommMethod(TestBase):
|
||||
self.moe_config.dp_group = MagicMock()
|
||||
self.moe_config.global_redundant_expert_num = 0
|
||||
|
||||
@patch("vllm_ascend.ops.fused_moe.moe_comm_method.get_forward_context")
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch(
|
||||
"vllm_ascend.ops.fused_moe.moe_comm_method.PrepareAndFinalizeWithAllGather"
|
||||
)
|
||||
@@ -73,7 +73,7 @@ class TestMoECommMethod(TestBase):
|
||||
context_metadata=context_metadata)
|
||||
mock_pf_instance.finalize.assert_called_once_with(h_out, True, None)
|
||||
|
||||
@patch("vllm_ascend.ops.fused_moe.moe_comm_method.get_forward_context")
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch(
|
||||
"vllm_ascend.ops.fused_moe.moe_comm_method.PrepareAndFinalizeWithMC2")
|
||||
@patch("vllm_ascend.ops.fused_moe.moe_comm_method.TokenDispatcherWithMC2")
|
||||
@@ -116,7 +116,7 @@ class TestMoECommMethod(TestBase):
|
||||
context_metadata=context_metadata)
|
||||
mock_pf_instance.finalize.assert_called_once_with(h_out, True, None)
|
||||
|
||||
@patch("vllm_ascend.ops.fused_moe.moe_comm_method.get_forward_context")
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch(
|
||||
"vllm_ascend.ops.fused_moe.moe_comm_method.PrepareAndFinalizeWithAll2All"
|
||||
)
|
||||
@@ -155,7 +155,7 @@ class TestMoECommMethod(TestBase):
|
||||
mock_pf_instance.prepare.assert_called_once_with(
|
||||
hidden_states, router_logits, False, False, QuantType.NONE)
|
||||
|
||||
@patch("vllm_ascend.ops.fused_moe.moe_comm_method.get_forward_context")
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch(
|
||||
"vllm_ascend.ops.fused_moe.moe_comm_method.PrepareAndFinalizeWithAllGather"
|
||||
)
|
||||
|
||||
@@ -32,7 +32,7 @@ class TestPrepareAndFinalize(unittest.TestCase):
|
||||
@patch(
|
||||
"vllm_ascend.ops.fused_moe.prepare_finalize.get_tensor_model_parallel_rank",
|
||||
return_value=0)
|
||||
@patch("vllm_ascend.ops.fused_moe.prepare_finalize.get_forward_context")
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
def test_mc2_prepare_finalize(self, mock_get_forward_context, mock_tp_rank,
|
||||
mock_tp_size):
|
||||
mock_context = MagicMock()
|
||||
@@ -65,7 +65,7 @@ class TestPrepareAndFinalize(unittest.TestCase):
|
||||
@patch(
|
||||
"vllm_ascend.ops.fused_moe.prepare_finalize.get_tensor_model_parallel_rank",
|
||||
return_value=0)
|
||||
@patch("vllm_ascend.ops.fused_moe.prepare_finalize.get_forward_context")
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch("torch.distributed.all_gather")
|
||||
def test_mc2_tp_split_allgather(self, mock_all_gather,
|
||||
mock_get_forward_context, mock_tp_rank,
|
||||
@@ -169,7 +169,7 @@ class TestPrepareAndFinalize(unittest.TestCase):
|
||||
self.assertEqual(final_result.shape[0], 2)
|
||||
|
||||
@patch("vllm_ascend.ops.fused_moe.prepare_finalize.get_dp_group")
|
||||
@patch("vllm_ascend.ops.fused_moe.prepare_finalize.get_forward_context")
|
||||
@patch('vllm_ascend.ascend_forward_context.get_forward_context')
|
||||
@patch("vllm_ascend.ops.fused_moe.prepare_finalize.enable_sp",
|
||||
return_value=False)
|
||||
def test_allgather_prepare_finalize(self, mock_enable_sp,
|
||||
|
||||
Reference in New Issue
Block a user