Fix DSR1 accuracy for flashinfer_trtllm MoE with FP8 quantization (#11081)

This commit is contained in:
Trevor Morris
2025-09-30 10:33:12 -07:00
committed by GitHub
parent 229d2b95f1
commit a6cc86df9d
2 changed files with 4 additions and 4 deletions

View File

@@ -575,9 +575,9 @@ class FusedMoE(torch.nn.Module):
)
# Flashinfer assumes w31 format for w13_weight. Same for the scales.
if (
should_use_flashinfer_trtllm_moe()
and self.quant_method.__class__.__name__ == "ModelOptNvFp4FusedMoEMethod"
if should_use_flashinfer_trtllm_moe() and (
isinstance(self.quant_method, ModelOptNvFp4FusedMoEMethod)
or isinstance(self.quant_method, Fp8MoEMethod)
):
shard_id = {"w1": "w3", "w3": "w1", "w2": "w2"}[shard_id]

View File

@@ -916,7 +916,7 @@ class ServerArgs:
if self.moe_runner_backend == "flashinfer_trtllm":
assert (
self.quantization == "modelopt_fp4" or self.quantization == "fp8"
), "modelopt_fp4 quantization is required for Flashinfer TRTLLM MoE"
), "modelopt_fp4 or fp8 quantization is required for Flashinfer TRTLLM MoE"
self.disable_shared_experts_fusion = True
logger.warning(
"FlashInfer TRTLLM MoE is enabled. --disable-shared-experts-fusion is automatically set."