From b69db4ce55977862be256c793970870280d1f375 Mon Sep 17 00:00:00 2001 From: LI SHENGYONG <49200266+shenchuxiaofugui@users.noreply.github.com> Date: Thu, 8 Jan 2026 09:51:48 +0800 Subject: [PATCH] [EPLB][CI] EPLB add aclgraph and redundant expert ci (#5625) ### What this PR does / why we need it? EPLB currently does not have CI related to aclgraph and redundancy experts; this PR adds them. release on #5529 ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? Tested the use cases to be added in this PR. PASSED ====================================================== warnings summary ========================================================== :241 :241: DeprecationWarning: builtin type SwigPyPacked has no __module__ attribute :241 :241: DeprecationWarning: builtin type SwigPyObject has no __module__ attribute -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html ====================================================== 1 passed, 2 warnings in 272.24s (0:04:32) ===================================================== - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/8be6432bdaf6275664d857b1e5e9bf8ed1ce299e Signed-off-by: shenchuxiaofugui <1311027364@qq.com> --- tests/e2e/multicard/2-cards/test_qwen3_moe.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/e2e/multicard/2-cards/test_qwen3_moe.py b/tests/e2e/multicard/2-cards/test_qwen3_moe.py index 0c43244d..bdb9bd3a 100644 --- a/tests/e2e/multicard/2-cards/test_qwen3_moe.py +++ b/tests/e2e/multicard/2-cards/test_qwen3_moe.py @@ -79,10 +79,11 @@ def test_qwen3_moe_distributed_aiv_tp2(): async def test_qwen3_moe_w8a8_distributed_tp2_ep_dynamic_eplb(): model = "vllm-ascend/Qwen3-30B-A3B-W8A8" port = get_open_port() + compilation_config = json.dumps({"cudagraph_capture_sizes": [8]}) server_args = [ "--max_model_len", "8192", "--tensor_parallel_size", "2", "--enable_expert_parallel", "--quantization", "ascend", "--port", - str(port), "--enforce_eager" + str(port), "--compilation-config", compilation_config ] env_dict = {"HCCL_BUFFSIZE": "1024"} with RemoteOpenAIServer(model, @@ -93,7 +94,7 @@ async def test_qwen3_moe_w8a8_distributed_tp2_ep_dynamic_eplb(): client = server.get_async_client() batch = await client.completions.create(model=model, prompt="What is deeplearning?", - max_tokens=300, + max_tokens=400, temperature=0, top_p=1.0, n=1) @@ -106,7 +107,8 @@ async def test_qwen3_moe_w8a8_distributed_tp2_ep_dynamic_eplb(): additional_config = { "dynamic_eplb": True, "num_iterations_eplb_update": 100, - "num_wait_worker_iterations": 20 + "num_wait_worker_iterations": 20, + "num_redundant_experts": 2 } server_args.extend(["--additional-config", json.dumps(additional_config)]) with RemoteOpenAIServer(model, @@ -117,7 +119,7 @@ async def test_qwen3_moe_w8a8_distributed_tp2_ep_dynamic_eplb(): client = server.get_async_client() batch = await client.completions.create(model=model, prompt="What is deeplearning?", - max_tokens=300, + max_tokens=400, temperature=0, top_p=1.0, n=1)