[CI] change to new ds model (#1513)

Previous, the DeepSeek V3 Pruning weight is not correct, the moe layer
is not tested. We update a new Pruning model to enable moe layer
compute.

This PR fix the CI to address the new weight.

---------

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-06-30 19:02:29 +08:00
committed by GitHub
parent 8013634e9c
commit a054f0f4ca
3 changed files with 7 additions and 7 deletions

View File

@@ -41,7 +41,7 @@ def test_generate_with_allgather():
sampling_params = SamplingParams(max_tokens=100, temperature=0.0) sampling_params = SamplingParams(max_tokens=100, temperature=0.0)
with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"), with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"),
tensor_parallel_size=16, tensor_parallel_size=4,
enforce_eager=True, enforce_eager=True,
max_model_len=1024, max_model_len=1024,
dtype="auto", dtype="auto",
@@ -67,7 +67,7 @@ def test_generate_with_alltoall():
sampling_params = SamplingParams(max_tokens=100, temperature=0.0) sampling_params = SamplingParams(max_tokens=100, temperature=0.0)
with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"), with VllmRunner(snapshot_download("vllm-ascend/DeepSeek-V3-Pruning"),
tensor_parallel_size=16, tensor_parallel_size=4,
enforce_eager=True, enforce_eager=True,
max_model_len=1024, max_model_len=1024,
dtype="auto", dtype="auto",

View File

@@ -56,7 +56,7 @@ def test_models_distributed_DeepSeek_multistream_moe():
with VllmRunner( with VllmRunner(
"vllm-ascend/DeepSeek-V3-Pruning", "vllm-ascend/DeepSeek-V3-Pruning",
dtype=dtype, dtype=dtype,
tensor_parallel_size=2, tensor_parallel_size=4,
distributed_executor_backend="mp", distributed_executor_backend="mp",
additional_config={ additional_config={
"torchair_graph_config": { "torchair_graph_config": {

View File

@@ -66,10 +66,10 @@ def _deepseek_torchair_test_fixture(
# inaccurate. This will only change if accuracy improves with the # inaccurate. This will only change if accuracy improves with the
# official weights of DeepSeek-V3. # official weights of DeepSeek-V3.
golden_results = [ golden_results = [
'Hello, my name is feasibility伸 spazio debtor添', 'Hello, my name is下载早点向前很有่อง',
'The president of the United States is begg"""\n杭州风和 bestimm', 'The president of the United States isSender)## physiological Albany',
'The capital of France is frequentlyশามalinkAllowed', 'The capital of France is Rocky转角 hospitalizedinterval sparked',
'The future of AI is deleting俯احت怎么样了حراف', 'The future of AI is её asegο BIOS一扫',
] ]
assert len(golden_results) == len(vllm_output) assert len(golden_results) == len(vllm_output)