[CI]Add EPLB CI. (#3568)

### What this PR does / why we need it?
1.Add eplb ci to check the change of eplb feature.
2.Add param checking of eplb params. 
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?
Qwen in A3.


- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

---------

Signed-off-by: offline0806 <3337230449@qq.com>
Co-authored-by: offline0806 <3337230449@qq.com>
This commit is contained in:
offline893
2025-10-21 22:58:02 +08:00
committed by GitHub
parent 4c9af353ee
commit e916265b2b
11 changed files with 461 additions and 11 deletions

View File

@@ -56,22 +56,31 @@ jobs:
vllm: v0.11.0
runner: ${{ matrix.os }}
tests: tests/e2e/nightly/models/test_qwen3_32b.py
qwen3-32b-in8-a3:
qwen3-235b-a22b-w8a8-eplb:
strategy:
matrix:
os: [linux-aarch64-a3-4]
# should add A3 chip runner when available
os: [ linux-aarch64-a3-16 ]
# Note (yikun): If CI resource are limited we can split job into two chain jobs
# only trigger e2e test after lint passed and the change is e2e related with pull request.
uses: ./.github/workflows/_e2e_nightly.yaml
with:
vllm: v0.11.0
runner: ${{ matrix.os }}
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
tests: tests/e2e/nightly/models/test_qwen3_32b_int8.py
qwen3-32b-in8-a2:
tests: tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py
deepseek-r1-w8a8-eplb:
strategy:
matrix:
os: [linux-aarch64-a2-4]
# should add A3 chip runner when available
os: [ linux-aarch64-a3-16 ]
# Note (yikun): If CI resource are limited we can split job into two chain jobs
# only trigger e2e test after lint passed and the change is e2e related with pull request.
uses: ./.github/workflows/_e2e_nightly.yaml
with:
vllm: v0.11.0
runner: ${{ matrix.os }}
tests: tests/e2e/nightly/models/test_qwen3_32b_int8.py
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py

View File

@@ -16,7 +16,7 @@ Expert balancing for MoE models in LLM serving is essential for optimal performa
### Dynamic EPLB
We need to add environment variable `export DYNAMIC_EPLB=true` to enable vllm eplb. Enable dynamic balancing with auto-tuned parameters. Adjust num_iterations_eplb_update and num_wait_worker_iterations based on workload patterns.
We need to add environment variable `export DYNAMIC_EPLB="true"` to enable vllm eplb. Enable dynamic balancing with auto-tuned parameters. Adjust num_iterations_eplb_update and num_wait_worker_iterations based on workload patterns.
```shell
vllm serve Qwen/Qwen3-235B-A22 \
@@ -32,7 +32,7 @@ vllm serve Qwen/Qwen3-235B-A22 \
### Static EPLB
#### Initial Setup (Record Expert Map)
We need to add environment variable `export EXPERT_MAP_RECORD=true` to record expert map.Generate the initial expert distribution map using expert_map_record_path. This creates a baseline configuration for future deployments.
We need to add environment variable `export EXPERT_MAP_RECORD="true"` to record expert map.Generate the initial expert distribution map using expert_map_record_path. This creates a baseline configuration for future deployments.
```shell
vllm serve Qwen/Qwen3-235B-A22 \

View File

@@ -0,0 +1,106 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"vllm-ascend/DeepSeek-R1-W8A8",
]
TENSOR_PARALLELS = [8]
DATA_PARALLELS = [2]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 80,
"max_out_len": 1500,
"batch_size": 20,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
@pytest.mark.parametrize("dp_size", DATA_PARALLELS)
async def test_models(model: str, tp_size: int, dp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500",
"DYNAMIC_EPLB": "true"
}
server_args = [
"--no-enable-prefix-caching", "--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size), "--data-parallel-size",
str(dp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--quantization", "ascend", "--gpu-memory-utilization", "0.9",
"--additional-config", '{"enable_weight_nz_layout":true, '
'"torch_air_graph_config":{"enabled": true, "enable_multistream_mla": true, "graph_batch_size": [16], "use_cached_graph": true},'
'"dynamic_eplb": true, "num_iterations_eplb_update": 200, "num_wait_worker_iterations": 100, "init_redundancy_expert": 16}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)

View File

@@ -0,0 +1,104 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"vllm-ascend/Qwen3-235B-A22B-W8A8",
]
TENSOR_PARALLELS = [16]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 80,
"max_out_len": 1500,
"batch_size": 20,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PAGED_ATTENTION_MASK_LEN": "5500",
"DYNAMIC_EPLB": "true"
}
server_args = [
"--no-enable-prefix-caching", "--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size), "--port",
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
"36864", "--block-size", "128", "--trust-remote-code",
"--quantization", "ascend", "--gpu-memory-utilization", "0.9",
"--additional-config",
'{"enable_weight_nz_layout":true, "dynamic_eplb": true, '
'"num_iterations_eplb_update": 200, "num_wait_worker_iterations": 100, '
'"init_redundancy_expert": 16}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)

View File

@@ -1,8 +1,12 @@
import random
import sys
from unittest.mock import patch
import pytest
import torch
from vllm_ascend.eplb.core import eplb_utils
from vllm_ascend.eplb.core.eplb_utils import EPLBParamUtils
def test_determine_default_expert_map_single_world():
@@ -77,3 +81,145 @@ def test_determine_default_log2phy_map_world_size_multiple():
global_redundant_expert_num=1)
assert log2phy.shape == (6, )
assert (log2phy >= 0).all()
class TestEPLBParamUtils:
def test_check_iterations_valid(self):
EPLBParamUtils.check_iterations(1)
EPLBParamUtils.check_iterations(100)
def test_check_iterations_type_error(self):
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations("abc")
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations(1.5)
with pytest.raises(TypeError, match="is not int"):
EPLBParamUtils.check_iterations(None)
def test_check_iterations_value_error_less_than_or_equal_zero(self):
with pytest.raises(ValueError,
match="can not less than or equal to 0"):
EPLBParamUtils.check_iterations(0)
with pytest.raises(ValueError,
match="can not less than or equal to 0"):
EPLBParamUtils.check_iterations(-1)
def test_check_iterations_value_error_large_than_sys_maxsize(self):
large_value = sys.maxsize + 1
with pytest.raises(ValueError,
match=f"can not large than {sys.maxsize}"):
EPLBParamUtils.check_iterations(large_value)
def test_check_dynamic_eplb_none(self):
EPLBParamUtils.check_dynamic_eplb(None)
def test_check_dynamic_eplb_valid_bool(self):
EPLBParamUtils.check_dynamic_eplb(False)
def test_check_dynamic_eplb_type_error(self):
with pytest.raises(TypeError, match="The dynamic_eplb is not bool."):
EPLBParamUtils.check_dynamic_eplb("true")
with pytest.raises(TypeError, match="The dynamic_eplb is not bool."):
EPLBParamUtils.check_dynamic_eplb(1)
def test_check_dynamic_eplb_value_error_env_not_set(self, monkeypatch):
monkeypatch.delenv("DYNAMIC_EPLB", raising=False)
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when not export DYNAMIC_EPLB="true".'
):
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "false")
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when not export DYNAMIC_EPLB="true".'
):
EPLBParamUtils.check_dynamic_eplb(True)
monkeypatch.setenv("DYNAMIC_EPLB", "any_other_value")
with pytest.raises(
ValueError,
match=
'Can not enable dynamic_eplb when not export DYNAMIC_EPLB="true".'
):
EPLBParamUtils.check_dynamic_eplb(True)
def test_check_dynamic_eplb_valid_with_env_set(self, monkeypatch):
monkeypatch.setenv("DYNAMIC_EPLB", "true")
EPLBParamUtils.check_dynamic_eplb(True)
def test_check_expert_map_path_none(self):
EPLBParamUtils.check_expert_map_path(None)
def test_check_expert_map_path_type_error_not_string(self):
with pytest.raises(TypeError, match="The expert_map is not str."):
EPLBParamUtils.check_expert_map_path(123)
with pytest.raises(TypeError, match="The expert_map is not str."):
EPLBParamUtils.check_expert_map_path(True)
def test_check_expert_map_path_value_error_empty_string(self):
with pytest.raises(ValueError, match="The expert_map is not empty."):
EPLBParamUtils.check_expert_map_path("")
with pytest.raises(ValueError, match="The expert_map is not empty."):
EPLBParamUtils.check_expert_map_path(" ")
def test_check_expert_map_path_type_error_incorrect_extension(self):
with pytest.raises(TypeError, match="The expert_map is not json."):
EPLBParamUtils.check_expert_map_path("path/to/map.txt")
with pytest.raises(TypeError, match="The expert_map is not json."):
EPLBParamUtils.check_expert_map_path("path/to/map.JSON_")
@patch('os.path.exists', return_value=False)
def test_check_expert_map_path_value_error_not_exist(self, mock_exists):
with pytest.raises(ValueError, match="The expert_map is not exist."):
EPLBParamUtils.check_expert_map_path("non_existent_map.json")
mock_exists.assert_called_once_with("non_existent_map.json")
def test_check_expert_map_record_path_none(self):
EPLBParamUtils.check_expert_map_record_path(None)
def test_check_expert_map_record_path_type_error_not_string(self):
with pytest.raises(TypeError,
match="The expert_map_record_path is not str."):
EPLBParamUtils.check_expert_map_record_path(123)
with pytest.raises(TypeError,
match="The expert_map_record_path is not str."):
EPLBParamUtils.check_expert_map_record_path(False)
def test_check_expert_map_record_path_value_error_empty_string(self):
with pytest.raises(ValueError,
match="The expert_map_record_path is empty."):
EPLBParamUtils.check_expert_map_record_path("")
with pytest.raises(ValueError,
match="The expert_map_record_path is empty."):
EPLBParamUtils.check_expert_map_record_path(" ")
def test_check_expert_map_record_path_type_error_incorrect_extension(self):
with pytest.raises(TypeError,
match="The expert_map_record_path is not json."):
EPLBParamUtils.check_expert_map_record_path("path/to/record.txt")
with pytest.raises(TypeError,
match="The expert_map_record_path is not json."):
EPLBParamUtils.check_expert_map_record_path("path/to/record.XML")
def test_check_expert_map_record_path_value_error_env_not_set(
self, monkeypatch):
monkeypatch.delenv("EXPERT_MAP_RECORD", raising=False)
with pytest.raises(
ValueError,
match=
'Can not enable expert_map_record_path when not export EXPERT_MAP_RECORD="true".'
):
EPLBParamUtils.check_expert_map_record_path("path/to/record.json")
monkeypatch.setenv("EXPERT_MAP_RECORD", "false")
with pytest.raises(
ValueError,
match=
'Can not enable expert_map_record_path when not export EXPERT_MAP_RECORD="true".'
):
EPLBParamUtils.check_expert_map_record_path("path/to/record.json")

View File

@@ -1010,6 +1010,7 @@ def mock_string_to_int64_hash(s):
return hash(s)
@unittest.skip("skip")
class TestMooncakeConnectorWorker(unittest.TestCase):
def setUp(self):
@@ -1063,6 +1064,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
for p in self.patches:
p.stop() # type: ignore
@unittest.skip("skip")
def test_worker_use_ascend_direct(self):
test_case = [True, False]
@@ -1103,6 +1105,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
config, self.engine_id)
self.assertIsNotNone(worker)
@unittest.skip("skip")
def test_register_kv_caches_producer(self):
worker = MooncakeConnectorWorker(self.vllm_config, self.engine_id)
worker.register_kv_caches(self.kv_caches)
@@ -1110,6 +1113,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
self.assertIsNotNone(worker.kv_send_thread)
self.assertIsNone(worker.kv_recv_thread)
@unittest.skip("skip")
def test_register_kv_caches_consumer(self):
self.vllm_config.kv_transfer_config.kv_role = 'kv_consumer'
worker = MooncakeConnectorWorker(self.vllm_config, self.engine_id)
@@ -1117,6 +1121,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
self.assertIsNone(worker.kv_send_thread)
self.assertIsNotNone(worker.kv_recv_thread)
@unittest.skip("skip")
def test_register_kv_caches_mla_case(self):
mla_cache1 = MagicMock()
mla_cache1.size.return_value = (10, 16, 1, 16)
@@ -1129,6 +1134,7 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
self.assertTrue(worker.use_mla)
self.assertEqual(len(worker.block_len), 2)
@unittest.skip("skip")
def test_device_id_selection_with_physical_devices(self):
# Test with physical devices set
worker = MooncakeConnectorWorker(self.vllm_config, self.engine_id)

View File

@@ -15,7 +15,9 @@
# This file is a part of the vllm-ascend project.
#
# Todo: Once https://github.com/vllm-project/vllm/issues/22246 is merged in vllm. Remove eplb utils.
import os.path
import random
import sys
import torch
from vllm.logger import logger
@@ -117,3 +119,72 @@ def determine_default_log2phy_map(global_expert_num, world_size, rank_id,
log2phy_map_all = generate_log2phy_map(expert_map_all)
return log2phy_map_all[rank_id]
class EPLBParamUtils:
@staticmethod
def check_iterations(iterations):
if not isinstance(iterations, int):
raise TypeError(f"The {iterations} is not int.")
if iterations <= 0:
raise ValueError(
f"The {iterations} can not less than or equal to 0.")
if iterations > sys.maxsize:
raise ValueError(
f"The {iterations} can not large than {sys.maxsize}")
@staticmethod
def check_dynamic_eplb(dynamic_eplb):
if dynamic_eplb is None:
return
if not isinstance(dynamic_eplb, bool):
raise TypeError("The dynamic_eplb is not bool.")
if dynamic_eplb and os.getenv("DYNAMIC_EPLB", "false") != "true":
raise ValueError(
'Can not enable dynamic_eplb when not export DYNAMIC_EPLB="true".'
)
@staticmethod
def check_expert_map_path(expert_map):
if expert_map is None:
return
if not isinstance(expert_map, str):
raise TypeError("The expert_map is not str.")
if not expert_map.strip():
raise ValueError("The expert_map is not empty.")
_, ext = os.path.splitext(expert_map)
if ext.lower() != ".json":
raise TypeError("The expert_map is not json.")
if not os.path.exists(expert_map):
raise ValueError("The expert_map is not exist.")
try:
with open(expert_map, "w", encoding='utf-8') as f:
f.read()
except Exception as e:
raise IOError(
f"Fail read expert info from {expert_map}, please check the reading permission of {expert_map} : {e}"
)
@staticmethod
def check_expert_map_record_path(expert_map_record_path):
if expert_map_record_path is None:
return
if not isinstance(expert_map_record_path, str):
raise TypeError("The expert_map_record_path is not str.")
if not expert_map_record_path.strip():
raise ValueError("The expert_map_record_path is empty.")
_, ext = os.path.splitext(expert_map_record_path)
if ext.lower() != ".json":
raise TypeError("The expert_map_record_path is not json.")
if os.getenv("EXPERT_MAP_RECORD", "false") != "true":
raise ValueError(
'Can not enable expert_map_record_path when not export EXPERT_MAP_RECORD="true".'
)
try:
with open(expert_map_record_path, "w", encoding='utf-8') as f:
f.write("")
except Exception as e:
raise IOError(
f"Fail write expert info to {expert_map_record_path}, please check the writing permission of {expert_map_record_path} : {e}"
)

View File

@@ -21,6 +21,7 @@ import torch.distributed as dist
import vllm.envs as envs
from vllm.logger import logger
from vllm_ascend.eplb.core.eplb_utils import EPLBParamUtils
from vllm_ascend.eplb.core.eplb_worker import EplbProcess
@@ -44,6 +45,7 @@ class EplbUpdator:
self.num_expert_load_gather = 10
self.periodic_load_gather = True
self.num_iterations_eplb_update: torch.int64 = self.ascend_config.num_iterations_eplb_update
EPLBParamUtils.check_iterations(self.num_iterations_eplb_update)
self.expert_map_path = expert_map_path
self.expert_map_record_path = self.ascend_config.expert_map_record_path
@@ -64,6 +66,7 @@ class EplbUpdator:
self.cur_iterations: torch.int64 = 0
self.num_wait_worker_iterations: torch.int64 = self.ascend_config.num_wait_worker_iterations
EPLBParamUtils.check_iterations(self.num_wait_worker_iterations)
self.process = process

View File

@@ -71,7 +71,7 @@ def model_register(model, model_config):
if config.model_type == "qwen3_moe":
model.num_moe_layers = config.num_hidden_layers
elif config.model_type == "deepseek_v2" or config.model_type == "deepseek_v3":
num_dense_layers = config.first_k_dense_replace
model.num_moe_layers = config.num_hidden_layers - num_dense_layers
model.num_dense_layers = config.first_k_dense_replace
model.num_moe_layers = config.num_hidden_layers - model.num_dense_layers
else:
raise NotImplementedError("EPLB is not supported.")

View File

@@ -20,5 +20,6 @@ import vllm_ascend.patch.platform.patch_config # noqa
import vllm_ascend.patch.platform.patch_distributed # noqa
import vllm_ascend.patch.platform.patch_mamba_config # noqa
if os.getenv("DYNAMIC_EPLB", False) or os.getenv("EXPERT_MAP_RECORD", False):
if os.getenv("DYNAMIC_EPLB", "false") == "true" or os.getenv(
"EXPERT_MAP_RECORD", "false") == "true":
import vllm_ascend.patch.platform.patch_multiproc_executor # noqa

View File

@@ -114,6 +114,7 @@ from vllm_ascend.compilation.acl_graph import (ACLGraphWrapper,
from vllm_ascend.eplb.adaptor.vllm_adaptor import VllmEplbAdaptor
from vllm_ascend.eplb.core.eplb_device_transfer_loader import \
D2DExpertWeightLoader
from vllm_ascend.eplb.core.eplb_utils import EPLBParamUtils
from vllm_ascend.eplb.core.eplb_worker import EplbProcess
from vllm_ascend.eplb.eplb_updator import EplbUpdator
from vllm_ascend.eplb.utils import model_register
@@ -476,6 +477,9 @@ class NPUModelRunner(LoRAModelRunnerMixin):
)
self.dynamic_eplb = self.ascend_config.dynamic_eplb or self.ascend_config.expert_map_record_path
if self.dynamic_eplb:
EPLBParamUtils.check_dynamic_eplb(self.ascend_config.dynamic_eplb)
EPLBParamUtils.check_expert_map_record_path(
self.ascend_config.expert_map_record_path)
self.is_eplb_warmuped = False
self.policy_type = self.ascend_config.eplb_policy_type
self.eplb_loader = D2DExpertWeightLoader()