[Feat] Multi-stream for eplb heat collection and aggregation (#4214)

### What this PR does / why we need it?
This PR optimizes multistream for eplb heat collection and aggregation

- vLLM version: v0.12.0
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.12.0

---------

Signed-off-by: daishixun <dsxsteven@sina.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
dsxsteven
2025-12-09 16:16:55 +08:00
committed by GitHub
parent dda027e680
commit 9a885d08d0
3 changed files with 38 additions and 15 deletions

View File

@@ -23,6 +23,8 @@ from vllm.logger import logger
from vllm_ascend.eplb.core.eplb_utils import EPLBParamUtils
from vllm_ascend.eplb.core.eplb_worker import EplbProcess
from vllm_ascend.eplb.utils import moe_load_async_stream
from vllm_ascend.utils import npu_stream_switch
class EplbUpdator:
@@ -153,21 +155,22 @@ class EplbUpdator:
self._gather_buffer = None
if dist.is_initialized():
self.world_size = dist.get_world_size()
self.device = local_load.device
if self._gather_buffer is None:
shape = (self.world_size, *local_load.shape)
self._gather_buffer = torch.empty(shape,
dtype=local_load.dtype,
device=self.device)
with npu_stream_switch(moe_load_async_stream()):
self.world_size = dist.get_world_size()
self.device = local_load.device
if self._gather_buffer is None:
shape = (self.world_size, *local_load.shape)
self._gather_buffer = torch.empty(shape,
dtype=local_load.dtype,
device=self.device)
dist.all_gather_into_tensor(self._gather_buffer, local_load)
dist.all_gather_into_tensor(self._gather_buffer, local_load)
moe_load = self._gather_buffer.permute(1, 0, 2)
self.shared_dict["moe_load"] = moe_load.cpu()
logger.debug(
f"[ModelRunner] Updated shared_dict['moe_load'] shape={moe_load.shape}"
)
moe_load = self._gather_buffer.permute(1, 0, 2)
self.shared_dict["moe_load"] = moe_load.cpu()
logger.debug(
f"[ModelRunner] Updated shared_dict['moe_load'] shape={moe_load.shape}"
)
else:
moe_load = local_load.unsqueeze(1)
self.shared_dict["moe_load"] = moe_load.cpu()

View File

@@ -18,6 +18,9 @@
import types
import torch
import torch_npu
_MOE_LOAD_ASYNC_STREAM = None
def get_expert_map(self, layer_id):
@@ -75,3 +78,12 @@ def model_register(model, model_config):
model.num_moe_layers = config.num_hidden_layers - model.num_dense_layers
else:
raise NotImplementedError("EPLB is not supported.")
def moe_load_async_stream() -> torch_npu.npu.Stream:
global _MOE_LOAD_ASYNC_STREAM
if _MOE_LOAD_ASYNC_STREAM is None:
# when this function is called before any stream is set,
# we return the default stream.
_MOE_LOAD_ASYNC_STREAM = torch_npu.npu.Stream()
return _MOE_LOAD_ASYNC_STREAM