From a054f0f4cae72d94db2f1f56c9f5605eb436682e Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Mon, 30 Jun 2025 19:02:29 +0800 Subject: [PATCH] [CI] change to new ds model (#1513) Previous, the DeepSeek V3 Pruning weight is not correct, the moe layer is not tested. We update a new Pruning model to enable moe layer compute. This PR fix the CI to address the new weight. --------- Signed-off-by: wangxiyuan --- tests/{ => e2e}/multicard/test_fused_moe_allgather_ep.py | 4 ++-- tests/e2e/multicard/test_offline_inference_distributed.py | 2 +- tests/e2e/multicard/test_torchair_graph_mode.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) rename tests/{ => e2e}/multicard/test_fused_moe_allgather_ep.py (96%) diff --git a/tests/multicard/test_fused_moe_allgather_ep.py b/tests/e2e/multicard/test_fused_moe_allgather_ep.py similarity index 96% rename from tests/multicard/test_fused_moe_allgather_ep.py rename to tests/e2e/multicard/test_fused_moe_allgather_ep.py index 1e63878..ad755dd 100644 --- a/tests/multicard/test_fused_moe_allgather_ep.py +++ b/tests/e2e/multicard/test_fused_moe_allgather_ep.py @@ -41,7 +41,7 @@ def test_generate_with_allgather(): sampling_params = SamplingParams(max_tokens=100, temperature=0.0) with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"), - tensor_parallel_size=16, + tensor_parallel_size=4, enforce_eager=True, max_model_len=1024, dtype="auto", @@ -67,7 +67,7 @@ def test_generate_with_alltoall(): sampling_params = SamplingParams(max_tokens=100, temperature=0.0) with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"), - tensor_parallel_size=16, + tensor_parallel_size=4, enforce_eager=True, max_model_len=1024, dtype="auto", diff --git a/tests/e2e/multicard/test_offline_inference_distributed.py b/tests/e2e/multicard/test_offline_inference_distributed.py index 503157d..04f4488 100644 --- a/tests/e2e/multicard/test_offline_inference_distributed.py +++ b/tests/e2e/multicard/test_offline_inference_distributed.py @@ -56,7 +56,7 @@ def test_models_distributed_DeepSeek_multistream_moe(): with VllmRunner( "vllm-ascend/DeepSeek-V3-Pruning", dtype=dtype, - tensor_parallel_size=2, + tensor_parallel_size=4, distributed_executor_backend="mp", additional_config={ "torchair_graph_config": { diff --git a/tests/e2e/multicard/test_torchair_graph_mode.py b/tests/e2e/multicard/test_torchair_graph_mode.py index 3aca92c..a0ae860 100644 --- a/tests/e2e/multicard/test_torchair_graph_mode.py +++ b/tests/e2e/multicard/test_torchair_graph_mode.py @@ -66,10 +66,10 @@ def _deepseek_torchair_test_fixture( # inaccurate. This will only change if accuracy improves with the # official weights of DeepSeek-V3. golden_results = [ - 'Hello, my name is feasibility伸 spazio debtor添', - 'The president of the United States is begg"""\n杭州风和 bestimm', - 'The capital of France is frequentlyশามalinkAllowed', - 'The future of AI is deleting俯احت怎么样了حراف', + 'Hello, my name is下载早点向前很有่อง', + 'The president of the United States isSender)## physiological Albany', + 'The capital of France is Rocky转角 hospitalizedinterval sparked', + 'The future of AI is её asegο BIOS一扫', ] assert len(golden_results) == len(vllm_output)