From 0664c6e67ad1830e397e8ee855eccf4e24c7fe96 Mon Sep 17 00:00:00 2001 From: starmountain1997 <77533802+starmountain1997@users.noreply.github.com> Date: Tue, 20 Jan 2026 12:40:54 +0800 Subject: [PATCH] [Doc] Add layer_sharding additional config for DeepSeek-V3.2-W8A8 (#5921) ### What this PR does / why we need it? #### Documentation Improvements New Configuration: Added the layer_sharding parameter to the DeepSeek-V3.2-W8A8 deployment tutorial. This guides users to include `["q_b_proj", "o_proj"]` in their prefill node setup for better resource utilization. #### CI and Testing Updates Test Config Update: Updated the multi-node E2E test configuration file: tests/e2e/nightly/multi_node/config/DeepSeek-V3_2-W8A8-A3-dual-nodes.yaml. including disable `FLASHCOMM` and enable `FULL_DECODE_ONLY` and update performance baseline. ### Does this PR introduce any user-facing change? Yes. The documentation now recommends a more optimized startup command for DeepSeek-V3.2-W8A8. Users following the updated tutorial will see improved performance in multi-node PD disaggregation environments. ### How was this patch tested? CI Validation: The updated E2E test configuration has been verified through the nightly CI pipeline. Environment: * vLLM version: v0.13.0 Base Commit: [11b6af5](https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9) Hardware: Ascend A3/A2 multi-node cluster. --------- Signed-off-by: guozr Co-authored-by: guozr --- docs/source/tutorials/DeepSeek-V3.2.md | 2 ++ .../config/DeepSeek-V3_2-W8A8-A3-dual-nodes.yaml | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/source/tutorials/DeepSeek-V3.2.md b/docs/source/tutorials/DeepSeek-V3.2.md index e9ebd2ab..80b61615 100644 --- a/docs/source/tutorials/DeepSeek-V3.2.md +++ b/docs/source/tutorials/DeepSeek-V3.2.md @@ -313,6 +313,7 @@ Before you start, please --quantization ascend \ --enforce-eager \ --no-enable-prefix-caching \ + --additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}' \ --kv-transfer-config \ '{"kv_connector": "MooncakeConnectorV1", "kv_role": "kv_producer", @@ -388,6 +389,7 @@ Before you start, please --quantization ascend \ --enforce-eager \ --no-enable-prefix-caching \ + --additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}' \ --kv-transfer-config \ '{"kv_connector": "MooncakeConnectorV1", "kv_role": "kv_producer", diff --git a/tests/e2e/nightly/multi_node/config/DeepSeek-V3_2-W8A8-A3-dual-nodes.yaml b/tests/e2e/nightly/multi_node/config/DeepSeek-V3_2-W8A8-A3-dual-nodes.yaml index 0fc4fac2..985b3ea4 100644 --- a/tests/e2e/nightly/multi_node/config/DeepSeek-V3_2-W8A8-A3-dual-nodes.yaml +++ b/tests/e2e/nightly/multi_node/config/DeepSeek-V3_2-W8A8-A3-dual-nodes.yaml @@ -12,7 +12,7 @@ env_common: OMP_NUM_THREADS: 1 VLLM_ASCEND_ENABLE_MLAPO: 1 PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True" - VLLM_ASCEND_ENABLE_FLASHCOMM1: 1 + VLLM_ASCEND_ENABLE_FLASHCOMM1: 0 ASCEND_A3_EBA_ENABLE: 1 @@ -37,9 +37,9 @@ deployment: --gpu-memory-utilization 0.85 --trust-remote-code --speculative-config '{"num_speculative_tokens": 2, "method":"deepseek_mtp"}' + --compilation-config '{"cudagraph_capture_sizes": [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}' --tokenizer-mode deepseek_v32 --reasoning-parser deepseek_v3 - --api-server-count 4 - server_cmd: > @@ -61,6 +61,7 @@ deployment: --gpu-memory-utilization 0.85 --trust-remote-code --speculative-config '{"num_speculative_tokens": 2, "method":"deepseek_mtp"}' + --compilation-config '{"cudagraph_capture_sizes": [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}' --tokenizer-mode deepseek_v32 --reasoning-parser deepseek_v3 benchmarks: @@ -73,7 +74,7 @@ benchmarks: max_out_len: 3000 batch_size: 512 request_rate: 11.2 - baseline: 594.915 + baseline: 905.6805 threshold: 0.97 acc: case_type: accuracy