[feat] support customized and separated hccl_buffer_size for process group initialization (#3073)
### What this PR does / why we need it? Currently, users have to set `HCCL_BUFFSIZE` to 512~1024 to perform mc2 operators (dispatch and combine) while running moe models with large `ep_size` and `batch_size`. This environmental variable not only affects allocated VRAM for mc2 group, but also increases VRAM allocation for dp, tp & ep groups, leading to significant kvcache and free_memory drops. This PR supports to automatically calculate and set `hccl_buffer_size` for each process group **(except mc2 group)** separately when users set `HCCL_BUFFSIZE` for mc2 group. This can significantly reduce wasted buffer_size set for dp, tp & ep groups. Note that current mc2 operators can only perform communication space partitioning based on `HCCL_BUFFSIZE` configuration. Once they support `hccl_buffer_size` configuration with `pg_options` while initializing process group, we'll caculate the required buffer size and users would avoid set `HCCL_BUFFSIZE` themselves. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? We performed E2E serving with deepseek_r1 initializing DP/TP/EP/MC2 process group and observed significant kv_cache and free_memory increase! - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 --------- Signed-off-by: linfeng-yuan <1102311262@qq.com>
This commit is contained in:
@@ -53,6 +53,8 @@ _SLEEP_MODE_ENABLED = None
|
||||
_CURRENT_STREAM = None
|
||||
_PREFETCH_STREAM = None
|
||||
_ASCEND_CUSTOMOP_IS_REIGISTERED = False
|
||||
_DEFAULT_BUFFER_SIZE = 200
|
||||
_MIN_DP_BUFFER_SIZE = 50
|
||||
|
||||
|
||||
def is_310p():
|
||||
@@ -648,3 +650,51 @@ def npu_stream_switch(target_stream: torch.npu.Stream,
|
||||
return nullcontext()
|
||||
assert target_stream is not None
|
||||
return torch.npu.stream(target_stream)
|
||||
|
||||
|
||||
def create_hccl_pg_options(group_name: str):
|
||||
options = torch_npu._C._distributed_c10d.ProcessGroupHCCL.Options()
|
||||
hccl_config = get_hccl_config_for_pg_options(group_name)
|
||||
if hccl_config is not None:
|
||||
options.hccl_config = hccl_config
|
||||
return options
|
||||
|
||||
|
||||
def get_hccl_config_for_pg_options(group_name: str) -> Optional[dict]:
|
||||
"""
|
||||
Get HCCL process group options for the given communication group name.
|
||||
|
||||
Args:
|
||||
group_name: Name of the communication group
|
||||
|
||||
Returns:
|
||||
HCCL pg_options or None for mc2 group
|
||||
"""
|
||||
# FIXME: Current mc2 operators only perform communication space partitioning
|
||||
# based on HCCL_BUFFSIZE configuration. Using pg_options with mc2 group would
|
||||
# result in memory misalignment problems.
|
||||
if group_name and "mc2" in group_name:
|
||||
return None
|
||||
hccl_config_map = {
|
||||
"dp": {
|
||||
"hccl_buffer_size": calculate_dp_buffer_size()
|
||||
},
|
||||
}
|
||||
return hccl_config_map.get(group_name, get_default_buffer_config())
|
||||
|
||||
|
||||
def get_default_buffer_config() -> dict:
|
||||
return {"hccl_buffer_size": _DEFAULT_BUFFER_SIZE}
|
||||
|
||||
|
||||
def calculate_dp_buffer_size() -> int:
|
||||
"""
|
||||
formula of dp buffer size:
|
||||
dp_size + 2 (flags: with_prefill and enable_dbo)
|
||||
"""
|
||||
from vllm.config import get_current_vllm_config
|
||||
vllm_config = get_current_vllm_config()
|
||||
dp_size = vllm_config.parallel_config.data_parallel_size
|
||||
int32_size = torch.iinfo(torch.int32).bits // 8
|
||||
dp_buffer_size = math.ceil((dp_size + 2) * int32_size / (1024 * 1024))
|
||||
return max(dp_buffer_size, _MIN_DP_BUFFER_SIZE)
|
||||
|
||||
Reference in New Issue
Block a user