From 9227e6af73a0b13383f1dfcb70ef11f0e4af1ee5 Mon Sep 17 00:00:00 2001 From: Chen Chen <0109chenchen@gmail.com> Date: Wed, 24 Dec 2025 11:26:19 +0800 Subject: [PATCH] =?UTF-8?q?[bugfix]=20remove=20the=20EP=20buffer=20allocat?= =?UTF-8?q?ion=20introduced=20by=20fused-op=20dispatch=5Fffn=5Fc=E2=80=A6?= =?UTF-8?q?=20(#5284)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What this PR does / why we need it? - This PR removes the Expert Parallel (EP) HCCL buffer allocation that was previously introduced by the fused-op `dispatch_ffn_combine` (#3532 ), since the fused-op has switch to MC2 HCCL buffer (#5156 ). ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: release/v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/ad32e3e19ccf0526cb6744a5fed09a138a5fb2f9 Signed-off-by: Chen Chen <0109chenchen@gmail.com> --- vllm_ascend/utils.py | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/vllm_ascend/utils.py b/vllm_ascend/utils.py index 4246ee36..c8bf448c 100644 --- a/vllm_ascend/utils.py +++ b/vllm_ascend/utils.py @@ -941,9 +941,6 @@ def get_hccl_config_for_pg_options(group_name: str) -> Optional[dict]: "dp": { "hccl_buffer_size": calculate_dp_buffer_size() }, - "ep": { - "hccl_buffer_size": calculate_ep_buffer_size() - }, } return hccl_config_map.get(group_name, get_default_buffer_config()) @@ -965,31 +962,6 @@ def calculate_dp_buffer_size() -> int: return max(dp_buffer_size, _MIN_DP_BUFFER_SIZE) -def calculate_ep_buffer_size() -> int: - """ - formula of ep buffer size: - batch_size * hidden_size * topk * 4 - """ - ep_buffer_size = _DEFAULT_BUFFER_SIZE - try: - from vllm.config import get_current_vllm_config - vllm_config = get_current_vllm_config() - tp_size = vllm_config.parallel_config.tensor_parallel_size - hf_config = vllm_config.model_config.hf_config - - hidden_size = hf_config.hidden_size - topk = getattr(hf_config, "num_experts_per_tok", 1) - batch_size = vllm_config.scheduler_config.max_num_batched_tokens // tp_size - int8_size = torch.iinfo(torch.int8).bits // 8 - bf16_size = torch.finfo(torch.bfloat16).bits // 8 - ep_buffer_size = math.ceil( - (batch_size * hidden_size * topk * - (int8_size + bf16_size) * 3) / (1024 * 1024)) - except Exception: - pass - return max(ep_buffer_size, _DEFAULT_BUFFER_SIZE) - - # Currently, when in A2, setting the environment variables HCCL_INTRA_PCIE_ENABLE=1 # and HCCL_INTRA_ROCE_ENABLE=0 can reduce cross-machine communication traffic and # significantly improve communication performance of MC2 ops dispatch/combine.