[CI]Fix oom of deepseek-eplb nigtly test. (#3884)

### What this PR does / why we need it?
Fix oom of deepseek-eplb nigtly test

- vLLM version: v0.11.0rc3
- vLLM main:
83f478bb19

---------

Signed-off-by: offline0806 <3337230449@qq.com>
Co-authored-by: offline0806 <3337230449@qq.com>
This commit is contained in:
offline893
2025-10-30 10:18:07 +08:00
committed by GitHub
parent dc960e798e
commit 14ca1e5cb2
4 changed files with 4 additions and 4 deletions

View File

@@ -126,7 +126,7 @@ class D2DExpertWeightLoader:
local_expert_to_replace,
buffer_tensor_id)
logger.info(
logger.debug(
f"[EPLB] finished update expert weight for layer: {self.layer_id}")
self.recv_expert_list = []

View File

@@ -77,6 +77,7 @@ class EplbUpdator:
self.cur_iterations += 1
if self.cur_iterations == (self.num_iterations_eplb_update + \
self.num_wait_worker_iterations + self.num_moe_layers):
logger.info("Finish expert parallel load balancing.")
if self.expert_map_record_path is not None:
self.adaptor._export_tensor_to_file(
self.shared_dict["expert_maps"],