diff --git a/tests/e2e/nightly/single_node/models/test_deepseek_v3_2_w8a8.py b/tests/e2e/nightly/single_node/models/test_deepseek_v3_2_w8a8.py index 7559e2da..30cc0020 100644 --- a/tests/e2e/nightly/single_node/models/test_deepseek_v3_2_w8a8.py +++ b/tests/e2e/nightly/single_node/models/test_deepseek_v3_2_w8a8.py @@ -54,7 +54,7 @@ aisbench_cases = [{ "max_out_len": 1500, "batch_size": 4, "request_rate": 11.2, - "baseline": 134, + "baseline": 110.5681, "threshold": 0.97 }] @@ -80,13 +80,12 @@ async def test_models(model: str, tp_size: int, dp_size: int) -> None: str(tp_size), "--data-parallel-size", str(dp_size), "--port", str(port), "--max-model-len", "8192", "--max-num-batched-tokens", - "8192", "--max-num-seqs", "4", "--trust-remote-code", "--quantization", - "ascend", "--gpu-memory-utilization", "0.92", "--compilation-config", - '{"cudagraph_capture_sizes":[3, 6, 9, 12], "cudagraph_mode":"FULL_DECODE_ONLY"}', + "8192", "--max-num-seqs", "8", "--trust-remote-code", "--quantization", + "ascend", "--gpu-memory-utilization", "0.98", "--compilation-config", + '{"cudagraph_capture_sizes":[8, 16, 24, 32], "cudagraph_mode":"FULL_DECODE_ONLY"}', "--speculative-config", - '{"num_speculative_tokens": 2, "method":"deepseek_mtp"}', - "--additional-config", - '{"layer_sharding": ["q_b_proj", "o_proj"]}', + '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}', + "--additional-config", '{"layer_sharding": ["q_b_proj", "o_proj"]}', "--reasoning-parser", "deepseek_v3", "--tokenizer_mode", "deepseek_v32" ] request_keyword_args: dict[str, Any] = {