[refactor] replace scattered business kwargs with typed request objects and explicit stage boundaries (#7024)
### What this PR does / why we need it? Refactor `vllm_ascend/ops/fused_moe` to replace scattered MoE business `**kwargs` with typed request objects and explicit stage boundaries. - Prepare, dispatch, MLP, and quant stages now have clearer ownership. - Main MoE path no longer depends on business `kwargs.get(...)` lookups. - Comm and dispatcher interfaces are request-only on the main path. - UTs can assert stage-level fields directly instead of inferring behavior indirectly. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? CI passed. --------- Signed-off-by: linfeng-yuan <1102311262@qq.com>
This commit is contained in:
@@ -3,9 +3,8 @@ from unittest.mock import Mock, patch
|
||||
import torch
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.quantization.methods.w4a16 import (AscendW4A16FusedMoEMethod,
|
||||
pack_to_int32,
|
||||
unpack_from_int32)
|
||||
from vllm_ascend.ascend_forward_context import MoECommType
|
||||
from vllm_ascend.quantization.methods.w4a16 import AscendW4A16FusedMoEMethod, pack_to_int32, unpack_from_int32
|
||||
|
||||
|
||||
class TestUnpackFromInt32(TestBase):
|
||||
@@ -268,3 +267,41 @@ class TestAscendW4A16FusedMoEMethod(TestBase):
|
||||
torch.equal(layer.w13_weight_packed.data, original_w13_data))
|
||||
self.assertTrue(
|
||||
torch.equal(layer.w2_weight_packed.data, original_w2_data))
|
||||
|
||||
@patch("vllm_ascend.quantization.methods.w4a16._EXTRA_CTX")
|
||||
@patch("vllm_ascend.quantization.methods.w4a16.select_experts")
|
||||
def test_apply_uses_explicit_dispatch_and_mlp_args(self, mock_select_experts, mock_extra_ctx):
|
||||
tokens = 3
|
||||
hidden_size = self.output_size
|
||||
layer = self.build_layer()
|
||||
x = torch.randn(tokens, hidden_size, dtype=torch.float32)
|
||||
router_logits = torch.randn(tokens, self.experts, dtype=torch.float32)
|
||||
topk_weights = torch.randn(tokens, 2, dtype=torch.float32)
|
||||
topk_ids = torch.randint(0, self.experts, (tokens, 2), dtype=torch.int64)
|
||||
mc2_mask = torch.tensor([1, 0, 1], dtype=torch.bool)
|
||||
pertoken_scale = torch.randn(tokens, dtype=torch.float32)
|
||||
|
||||
mock_select_experts.return_value = (topk_weights, topk_ids)
|
||||
mock_comm = Mock()
|
||||
mock_comm.fused_experts.return_value = torch.randn(tokens, hidden_size, dtype=torch.float32)
|
||||
mock_extra_ctx.moe_comm_method = mock_comm
|
||||
mock_extra_ctx.moe_comm_type = MoECommType.ALLGATHER
|
||||
|
||||
self.quant_method.apply(
|
||||
layer=layer,
|
||||
x=x,
|
||||
router_logits=router_logits,
|
||||
top_k=2,
|
||||
renormalize=True,
|
||||
global_num_experts=self.experts,
|
||||
activation="gelu",
|
||||
apply_router_weight_on_input=True,
|
||||
mc2_mask=mc2_mask,
|
||||
pertoken_scale=pertoken_scale,
|
||||
)
|
||||
|
||||
fused_experts_input = mock_comm.fused_experts.call_args.kwargs["fused_experts_input"]
|
||||
self.assertEqual(fused_experts_input.activation, "gelu")
|
||||
self.assertTrue(fused_experts_input.routing.apply_router_weight_on_input)
|
||||
self.assertIs(fused_experts_input.routing.mc2_mask, mc2_mask)
|
||||
self.assertIs(fused_experts_input.routing.pertoken_scale, pertoken_scale)
|
||||
|
||||
@@ -3,8 +3,8 @@ from unittest.mock import Mock, patch
|
||||
import torch
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.quantization.methods.w8a8_dynamic import \
|
||||
AscendW8A8DynamicFusedMoEMethod
|
||||
from vllm_ascend.ascend_forward_context import MoECommType
|
||||
from vllm_ascend.quantization.methods.w8a8_dynamic import AscendW8A8DynamicFusedMoEMethod
|
||||
|
||||
|
||||
class TestAscendW8A8FusedMoEMethod(TestBase):
|
||||
@@ -32,8 +32,9 @@ class TestAscendW8A8FusedMoEMethod(TestBase):
|
||||
mock_ep_group = Mock()
|
||||
mock_get_ep_group.return_value = mock_ep_group
|
||||
mock_ascend_config = Mock()
|
||||
|
||||
mock_ascend_config.enable_chunked_prefill = False
|
||||
mock_ascend_config.multistream_overlap_gate = False
|
||||
mock_ascend_config.eplb_config = Mock(dynamic_eplb=False)
|
||||
mock_get_ascend_config.return_value = mock_ascend_config
|
||||
mock_mc2_group = Mock(device_group=0)
|
||||
mock_get_mc2_group.return_value = mock_mc2_group
|
||||
@@ -104,3 +105,125 @@ class TestAscendW8A8FusedMoEMethod(TestBase):
|
||||
new_layer = self.build_layer()
|
||||
self.quant_method.process_weights_after_loading(new_layer)
|
||||
mock_npu_format_cast.assert_called()
|
||||
|
||||
@patch("vllm_ascend.quantization.methods.w8a8_dynamic._EXTRA_CTX")
|
||||
@patch("vllm_ascend.quantization.methods.w8a8_dynamic.select_experts")
|
||||
def test_apply_uses_explicit_dispatch_and_mlp_args(self, mock_select_experts, mock_extra_ctx):
|
||||
tokens = 4
|
||||
hidden_size = self.hidden_size
|
||||
layer = torch.nn.Module()
|
||||
layer.w13_weight = torch.randint(
|
||||
-8,
|
||||
8,
|
||||
(self.num_experts, 2 * self.intermediate_size, hidden_size),
|
||||
dtype=torch.int8,
|
||||
)
|
||||
layer.w2_weight = torch.randint(
|
||||
-8,
|
||||
8,
|
||||
(self.num_experts, hidden_size, self.intermediate_size),
|
||||
dtype=torch.int8,
|
||||
)
|
||||
layer.w13_weight_scale_fp32 = torch.ones(self.num_experts, 2 * self.intermediate_size, dtype=torch.float32)
|
||||
layer.w2_weight_scale = torch.ones(self.num_experts, hidden_size, dtype=torch.float32)
|
||||
|
||||
x = torch.randn(tokens, hidden_size, dtype=torch.float32)
|
||||
router_logits = torch.randn(tokens, self.num_experts, dtype=torch.float32)
|
||||
topk_weights = torch.randn(tokens, 2, dtype=torch.float32)
|
||||
topk_ids = torch.randint(0, self.num_experts, (tokens, 2), dtype=torch.int64)
|
||||
mc2_mask = torch.tensor([1, 0, 1, 0], dtype=torch.bool)
|
||||
pertoken_scale = torch.randn(tokens, dtype=torch.float32)
|
||||
|
||||
mock_select_experts.return_value = (topk_weights, topk_ids)
|
||||
mock_comm = Mock()
|
||||
mock_comm.fused_experts.return_value = torch.randn(tokens, hidden_size, dtype=torch.float32)
|
||||
mock_extra_ctx.moe_comm_method = mock_comm
|
||||
mock_extra_ctx.moe_comm_type = MoECommType.ALLGATHER
|
||||
self.quant_method.multistream_overlap_gate = False
|
||||
self.quant_method.in_dtype = torch.float32
|
||||
|
||||
self.quant_method.apply(
|
||||
layer=layer,
|
||||
x=x,
|
||||
router_logits=router_logits,
|
||||
top_k=2,
|
||||
renormalize=True,
|
||||
global_num_experts=self.num_experts,
|
||||
activation="gelu",
|
||||
apply_router_weight_on_input=True,
|
||||
mc2_mask=mc2_mask,
|
||||
pertoken_scale=pertoken_scale,
|
||||
)
|
||||
|
||||
fused_experts_input = mock_comm.fused_experts.call_args.kwargs["fused_experts_input"]
|
||||
self.assertEqual(fused_experts_input.activation, "gelu")
|
||||
self.assertTrue(fused_experts_input.routing.apply_router_weight_on_input)
|
||||
self.assertIs(fused_experts_input.routing.mc2_mask, mc2_mask)
|
||||
self.assertIs(fused_experts_input.routing.pertoken_scale, pertoken_scale)
|
||||
self.assertIs(fused_experts_input.topk_weights, topk_weights)
|
||||
self.assertIs(fused_experts_input.topk_ids, topk_ids)
|
||||
|
||||
@patch("vllm_ascend.quantization.methods.w8a8_dynamic.get_flash_common3_context")
|
||||
@patch("vllm_ascend.quantization.methods.w8a8_dynamic._EXTRA_CTX")
|
||||
@patch("vllm_ascend.quantization.methods.w8a8_dynamic.select_experts")
|
||||
def test_apply_overlap_gate_uses_fc3_context(
|
||||
self,
|
||||
mock_select_experts,
|
||||
mock_extra_ctx,
|
||||
mock_get_flash_common3_context,
|
||||
):
|
||||
tokens = 4
|
||||
hidden_size = self.hidden_size
|
||||
layer = torch.nn.Module()
|
||||
layer.w13_weight = torch.randint(
|
||||
-8,
|
||||
8,
|
||||
(self.num_experts, 2 * self.intermediate_size, hidden_size),
|
||||
dtype=torch.int8,
|
||||
)
|
||||
layer.w2_weight = torch.randint(
|
||||
-8,
|
||||
8,
|
||||
(self.num_experts, hidden_size, self.intermediate_size),
|
||||
dtype=torch.int8,
|
||||
)
|
||||
layer.w13_weight_scale_fp32 = torch.ones(self.num_experts, 2 * self.intermediate_size, dtype=torch.float32)
|
||||
layer.w2_weight_scale = torch.ones(self.num_experts, hidden_size, dtype=torch.float32)
|
||||
|
||||
x = torch.randn(tokens, hidden_size, dtype=torch.float32)
|
||||
router_logits = torch.randn(tokens, self.num_experts, dtype=torch.float32)
|
||||
topk_weights = torch.randn(tokens, 2, dtype=torch.float32)
|
||||
topk_ids = torch.randint(0, self.num_experts, (tokens, 2), dtype=torch.int64)
|
||||
mc2_mask = torch.tensor([1, 0, 1, 0], dtype=torch.bool)
|
||||
pertoken_scale = torch.randn(tokens, dtype=torch.float32)
|
||||
|
||||
self.quant_method.multistream_overlap_gate = True
|
||||
self.quant_method.in_dtype = torch.float32
|
||||
mock_get_flash_common3_context.return_value = Mock(topk_weights=topk_weights, topk_ids=topk_ids)
|
||||
|
||||
mock_comm = Mock()
|
||||
mock_comm.fused_experts.return_value = torch.randn(tokens, hidden_size, dtype=torch.float32)
|
||||
mock_extra_ctx.moe_comm_method = mock_comm
|
||||
mock_extra_ctx.moe_comm_type = MoECommType.ALLGATHER
|
||||
|
||||
self.quant_method.apply(
|
||||
layer=layer,
|
||||
x=x,
|
||||
router_logits=router_logits,
|
||||
top_k=2,
|
||||
renormalize=True,
|
||||
global_num_experts=self.num_experts,
|
||||
activation="gelu",
|
||||
apply_router_weight_on_input=True,
|
||||
mc2_mask=mc2_mask,
|
||||
pertoken_scale=pertoken_scale,
|
||||
)
|
||||
|
||||
mock_select_experts.assert_not_called()
|
||||
fused_experts_input = mock_comm.fused_experts.call_args.kwargs["fused_experts_input"]
|
||||
self.assertEqual(fused_experts_input.activation, "gelu")
|
||||
self.assertTrue(fused_experts_input.routing.apply_router_weight_on_input)
|
||||
self.assertIs(fused_experts_input.routing.mc2_mask, mc2_mask)
|
||||
self.assertIs(fused_experts_input.routing.pertoken_scale, pertoken_scale)
|
||||
self.assertIs(fused_experts_input.topk_weights, topk_weights)
|
||||
self.assertIs(fused_experts_input.topk_ids, topk_ids)
|
||||
|
||||
Reference in New Issue
Block a user