From fca2f948c101c9d520f1a381a6b705d9e11c042e Mon Sep 17 00:00:00 2001 From: wjunLu <135617475+wjunLu@users.noreply.github.com> Date: Thu, 25 Dec 2025 09:18:05 +0800 Subject: [PATCH] [E2E Refactor] Enable skipped e2e case (#5287) ### What this PR does / why we need it? The test case `tests/e2e/multicard/test_data_parallel.py` was skipped due to the errors encountered during migration from Ascend A2 to A3, the details are as follows ``` (EngineCore_DP0 pid=17833) RuntimeError: npu_moe_distribute_dispatch_v2:build/CMakeFiles/torch_npu.dir/compiler_depend.ts:161 NPU function error: call aclnnMoeDistributeDispatchV3 failed, error code is 561002 (EngineCore_DP0 pid=17833) [ERROR] 2025-12-23-07:36:19 (PID:17833, Device:0, RankID:-1) ERR00100 PTA call acl api failed. (EngineCore_DP0 pid=17833) EZ9999: Inner Error! (EngineCore_DP0 pid=17833) EZ9999[PID: 17833] 2025-12-23-07:36:19.237.396 (EZ9999): HCCL_BUFFSIZE is too SMALL, maxBs = 512, h = 2048, epWorldSize = 2, localMoeExpertNum = 64, sharedExpertNum = 0, tokenNeedSizeDispatch = 4608, tokenNeedSizeCombine = 4096, k = 8, NEEDED_HCCL_BUFFSIZE(((maxBs * tokenNeedSizeDispatch * ep_worldsize * localMoeExpertNum) + (maxBs * tokenNeedSizeCombine * (k + sharedExpertNum))) * 2) = 609MB, HCCL_BUFFSIZE=200MB.[FUNC:MoeDistributeDispatchA3TilingFuncImpl][FILE:moe_distribute_dispatch_v2_tiling.cc][LINE:941] (EngineCore_DP0 pid=17833) TraceBack (most recent call last): (EngineCore_DP0 pid=17833) MoeDistributeDispatchV2 do tiling failed, ret is -1. (EngineCore_DP0 pid=17833) Check NnopbaseExecutorDoTiling(executor) failed (EngineCore_DP0 pid=17833) Check NnopbaseExecutorTilingAndUpdateBinInfo(executor) failed (EngineCore_DP0 pid=17833) Check NnopbaseExecutorMatchCache(executor) failed (EngineCore_DP0 pid=17833) Check NnopbaseRunForWorkspace(*executor, workspaceSize) failed ``` ### Does this PR introduce _any_ user-facing change? None ### How was this patch tested? After fixed, I ran `pytest -sv --durations=0 tests/e2e/multicard/test_data_parallel.py`, and the result looks good ``` ========================================================================================= warnings summary ========================================================================================= :241 :241: DeprecationWarning: builtin type SwigPyPacked has no __module__ attribute :241 :241: DeprecationWarning: builtin type SwigPyObject has no __module__ attribute -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html ======================================================================================== slowest durations ========================================================================================= 112.69s call tests/e2e/multicard/test_data_parallel.py::test_qwen_inference_dp2[32-vllm-ascend/Qwen3-30B-A3B-W8A8] 88.11s call tests/e2e/multicard/test_data_parallel.py::test_qwen_inference_dp2[32-Qwen/Qwen3-30B-A3B] 70.06s call tests/e2e/multicard/test_data_parallel.py::test_qwen_inference_dp2[32-Qwen/Qwen3-0.6B] (6 durations < 0.005s hidden. Use -vv to show these durations.) ============================================================================ 3 passed, 2 warnings in 270.88s (0:04:30) ============================================================================ ``` - vLLM version: release/v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/ad32e3e19ccf0526cb6744a5fed09a138a5fb2f9 Signed-off-by: wjunLu --- tests/e2e/multicard/test_data_parallel.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/e2e/multicard/test_data_parallel.py b/tests/e2e/multicard/test_data_parallel.py index 6edeeb37..6b4df381 100644 --- a/tests/e2e/multicard/test_data_parallel.py +++ b/tests/e2e/multicard/test_data_parallel.py @@ -28,16 +28,14 @@ from unittest.mock import patch import pytest MODELS = [ - "Qwen/Qwen3-0.6B", - "Qwen/Qwen3-30B-A3B", - # FIXME(Potabk): Skip this case for now - # "vllm-ascend/Qwen3-30B-A3B-W8A8" + "Qwen/Qwen3-0.6B", "Qwen/Qwen3-30B-A3B", "vllm-ascend/Qwen3-30B-A3B-W8A8" ] @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("max_tokens", [32]) @patch.dict(os.environ, {"ASCEND_RT_VISIBLE_DEVICES": "0,1"}) +@patch.dict(os.environ, {"HCCL_BUFFSIZE": "1024"}) def test_qwen3_inference_dp2(model, max_tokens): moe_models = ["Qwen/Qwen3-30B-A3B", "vllm-ascend/Qwen3-30B-A3B-W8A8"] quantization_models = ["vllm-ascend/Qwen3-30B-A3B-W8A8"]