[Dist][EP] Remove ETP/EP maintained in vllm-ascend (#1681)

### What this PR does / why we need it?
Remove ETP/EP maintained in branch main. We drop this as there is no
relevant scenarios to use ETP now, and we may subsequently advocate
implementing expert tensor parallelism in vLLM to support scenarios
where the expert is needed to be sliced

This is a part of #1422 backport.

Fixes https://github.com/vllm-project/vllm-ascend/issues/1396
https://github.com/vllm-project/vllm-ascend/issues/1154

### Does this PR introduce _any_ user-facing change?
We'll not maintain etp/ep in vllm-ascend anymore, and use the tp/ep in
vllm instead.

### How was this patch tested?
CI passed with new added and existing test.


- vLLM version: v0.9.2
- vLLM main:
fe8a2c544a

Signed-off-by: MengqingCao <cmq0113@163.com>
This commit is contained in:
Mengqing Cao
2025-07-21 09:08:04 +08:00
committed by GitHub
parent a8b316ac5b
commit 8cfd257992
24 changed files with 66 additions and 548 deletions

View File

@@ -36,7 +36,7 @@ COMPLETIONS_URL = f"http://{SERVER_HOST}:{SERVER_PORT}/v1/completions"
# pre-trained model path on Hugging Face.
# Qwen/Qwen2.5-0.5B-Instruct: accuracy test for DP.
# Qwen/Qwen3-30B-A3B: accuracy test for EP and ETP.
# Qwen/Qwen3-30B-A3B: accuracy test for EP.
# deepseek-ai/DeepSeek-V2-Lite: accuracy test for TP.
MODEL_NAME = ["Qwen/Qwen3-30B-A3B", "deepseek-ai/DeepSeek-V2-Lite"]
@@ -200,62 +200,3 @@ def test_lm_eval_accuracy_dp(model, max_tokens):
except subprocess.TimeoutExpired:
server_proc.kill()
server_proc.wait()
@pytest.mark.parametrize("max_tokens", [10])
@pytest.mark.parametrize("model", ["Qwen/Qwen3-30B-A3B"])
def test_lm_eval_accuracy_etp(model, max_tokens):
log_file = open("accuracy_etp.log", "a+")
cmd = [
"vllm", "serve", model, "--max_model_len", "4096",
"--tensor_parallel_size", "4", "--enforce_eager",
"--enable_expert_parallel", "--additional_config",
'{"expert_tensor_parallel_size": "4"}'
]
server_proc = subprocess.Popen(cmd,
stdout=log_file,
stderr=subprocess.DEVNULL)
try:
for _ in range(300):
try:
r = requests.get(HEALTH_URL, timeout=1)
if r.status_code == 200:
break
except requests.exceptions.RequestException:
pass
time.sleep(1)
else:
log_file.flush()
log_file.seek(0)
log_content = log_file.read()
pytest.fail(
f"vLLM serve did not become healthy after 300s: {HEALTH_URL}\n"
f"==== vLLM Serve Log Start ===\n{log_content}\n==== vLLM Serve Log End ==="
)
prompt = "bejing is a"
payload = {
"prompt": prompt,
"max_tokens": max_tokens,
"sampling_params": {
"temperature": 0.0,
"top_p": 1.0,
"seed": 123
}
}
resp = requests.post(COMPLETIONS_URL, json=payload, timeout=30)
resp.raise_for_status()
data = resp.json()
generated = data["choices"][0]["text"].strip()
expected = "city in china. it is the capital city of"
assert generated == expected, f"Expected `{expected}`, got `{generated}`"
finally:
server_proc.send_signal(signal.SIGINT)
try:
server_proc.wait(timeout=10)
except subprocess.TimeoutExpired:
server_proc.kill()
server_proc.wait()

View File

@@ -0,0 +1,30 @@
import pytest
from tests.e2e.conftest import VllmRunner
from tests.e2e.model_utils import check_outputs_equal
@pytest.mark.parametrize("model_name", ["deepseek-ai/DeepSeek-V2-Lite-Chat"])
def test_e2e_ep_correctness(model_name):
example_prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
max_tokens = 5
with VllmRunner(model_name, tensor_parallel_size=2) as vllm_model:
tp_output = vllm_model.generate_greedy(example_prompts, max_tokens)
with VllmRunner(model_name,
tensor_parallel_size=2,
enable_expert_parallel=True) as vllm_model:
ep_output = vllm_model.generate_greedy(example_prompts, max_tokens)
check_outputs_equal(
outputs_0_lst=ep_output,
outputs_1_lst=tp_output,
name_0="ep_output",
name_1="tp_output",
)

View File

@@ -50,7 +50,6 @@ def test_generate_with_allgather():
"enabled": True,
"chunked_prefill_enabled": False,
},
"expert_tensor_parallel_size": 1
}) as vllm_model:
vllm_model.generate(example_prompts, sampling_params)
@@ -74,6 +73,5 @@ def test_generate_with_alltoall():
"enabled": True,
"chunked_prefill_enabled": False,
},
"expert_tensor_parallel_size": 1
}) as vllm_model:
vllm_model.generate(example_prompts, sampling_params)

View File

@@ -123,6 +123,7 @@ def _pangu_torchair_test_fixture(
distributed_executor_backend="mp",
enforce_eager=False,
additional_config=additional_config,
enable_expert_parallel=True,
) as vllm_model:
# use greedy sampler to make sure the generated results are fix
vllm_output = vllm_model.generate_greedy(example_prompts, 5)

View File

@@ -1,208 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from unittest.mock import MagicMock, patch
import pytest
from vllm.distributed.parallel_state import GroupCoordinator
import vllm_ascend
from tests.ut.base import TestBase
from vllm_ascend.distributed.parallel_state import (
destory_ascend_model_parallel, get_ep_group, get_etp_group,
init_ascend_model_parallel, model_parallel_initialized)
class TestParallelState(TestBase):
@patch('vllm_ascend.distributed.parallel_state._EP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
def test_get_ep_group_when_initialized(self, mock_ep):
# Act
result = get_ep_group()
# Assert
assert isinstance(result, GroupCoordinator)
@patch('vllm_ascend.distributed.parallel_state._EP', None)
def test_get_ep_group_when_not_initialized(self):
# Act & Assert
with pytest.raises(AssertionError) as excinfo:
get_ep_group()
assert "expert model parallel group is not initialized" in str(
excinfo.value)
@patch('vllm_ascend.distributed.parallel_state._ETP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
def test_get_etp_group_when_initialized(self, mock_etp):
# Act
result = get_etp_group()
# Assert
assert isinstance(result, GroupCoordinator)
@patch('vllm_ascend.distributed.parallel_state._ETP', None)
def test_get_etp_group_when_not_initialized(self):
# Act & Assert
with pytest.raises(AssertionError) as excinfo:
get_etp_group()
assert "expert tensor parallel group is not initialized" in str(
excinfo.value)
@patch('vllm_ascend.distributed.parallel_state._ETP', None)
@patch('vllm_ascend.distributed.parallel_state._EP', None)
def test_model_parallel_initialized_when_both_none(self):
# Act & Assert
assert not model_parallel_initialized()
@patch('vllm_ascend.distributed.parallel_state._ETP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm_ascend.distributed.parallel_state._EP', None)
def test_model_parallel_initialized_when_ep_none(self, mock_etp):
# Act & Assert
assert not model_parallel_initialized()
@patch('vllm_ascend.distributed.parallel_state._ETP', None)
@patch('vllm_ascend.distributed.parallel_state._EP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
def test_model_parallel_initialized_when_etp_none(self, mock_ep):
# Act & Assert
assert not model_parallel_initialized()
@patch('vllm_ascend.distributed.parallel_state._ETP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm_ascend.distributed.parallel_state._EP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
def test_model_parallel_initialized_when_etp_initialized(
self, mock_ep, mock_etp):
# Act & Assert
assert model_parallel_initialized()
@patch('vllm_ascend.distributed.parallel_state._ETP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
@patch('vllm_ascend.distributed.parallel_state._EP',
new_callable=lambda: MagicMock(spec=GroupCoordinator))
def test_destroy_when_both_exist(self, mock_ep, mock_etp):
# Act
destory_ascend_model_parallel()
# Assert
mock_ep.destroy.assert_called_once()
mock_etp.destroy.assert_called_once()
assert vllm_ascend.distributed.parallel_state._ETP is None
assert vllm_ascend.distributed.parallel_state._EP is None
@patch('vllm_ascend.distributed.parallel_state._ETP', None)
@patch('vllm_ascend.distributed.parallel_state._EP',
new_callable=lambda: MagicMock())
def test_destory_ascend_model_parallel_when_etp_none(self, mock_ep):
# Act
destory_ascend_model_parallel()
# Assert
mock_ep.destroy.assert_called_once()
assert vllm_ascend.distributed.parallel_state._EP is None
assert vllm_ascend.distributed.parallel_state._ETP is None
@patch('vllm_ascend.distributed.parallel_state._ETP',
new_callable=lambda: MagicMock())
@patch('vllm_ascend.distributed.parallel_state._EP', None)
def test_destory_ascend_model_parallel_when_ep_none(self, mock_etp):
# Act
destory_ascend_model_parallel()
# Assert
mock_etp.destroy.assert_called_once()
assert vllm_ascend.distributed.parallel_state._ETP is None
assert vllm_ascend.distributed.parallel_state._EP is None
@patch('vllm_ascend.distributed.parallel_state._ETP', None)
@patch('vllm_ascend.distributed.parallel_state._EP', None)
def test_destory_ascend_model_parallel_when_both_none(self):
# Act
destory_ascend_model_parallel()
# Assert
assert vllm_ascend.distributed.parallel_state._ETP is None
assert vllm_ascend.distributed.parallel_state._EP is None
@patch('torch.distributed.is_initialized', return_value=True)
@patch('torch.distributed.get_world_size', return_value=8)
@patch('vllm_ascend.distributed.parallel_state.get_world_group',
return_value=MagicMock(device_group='npu:0', local_rank=0))
@patch('torch.distributed.get_backend', return_value='hccl')
@patch('vllm_ascend.distributed.parallel_state.init_model_parallel_group')
@patch('vllm_ascend.distributed.parallel_state.model_parallel_initialized',
return_value=False)
def test_init_ascend_model_parallel_normal_case(
self, mock_mp_init, mock_init_group, mock_get_backend,
mock_world_group, mock_get_world_size, mock_is_init):
"""Test normal initialization with default parameters"""
# Act
init_ascend_model_parallel()
# Assert
mock_init_group.assert_any_call([[0, 1, 2, 3, 4, 5, 6, 7]],
0,
'hccl',
group_name="ep")
mock_init_group.assert_any_call([[0]], 0, 'hccl', group_name="etp")
self.assertIsNotNone(vllm_ascend.distributed.parallel_state._EP)
self.assertIsNotNone(vllm_ascend.distributed.parallel_state._ETP)
@patch('vllm_ascend.distributed.parallel_state.model_parallel_initialized',
return_value=True)
def test_init_ascend_model_parallel_skip_if_initialized(
self, mock_mp_init):
"""Test skipping when model parallel already initialized"""
with patch.object(vllm_ascend.distributed.parallel_state,
'_EP') as mock_ep, patch.object(
vllm_ascend.distributed.parallel_state,
'_ETP') as mock_etp:
# Act
init_ascend_model_parallel()
# Assert
mock_ep.assert_not_called()
mock_etp.assert_not_called()
@patch('torch.distributed.is_initialized', return_value=False)
def test_init_ascend_model_parallel_assert_dist_not_init(
self, mock_is_init):
"""Test assertion when distributed not initialized"""
# Act & Assert
with self.assertRaises(AssertionError):
init_ascend_model_parallel()
@patch('torch.distributed.is_initialized', return_value=True)
@patch('torch.distributed.get_world_size', return_value=8)
@patch('vllm_ascend.distributed.parallel_state.get_world_group',
return_value=MagicMock(device_group='npu:0', local_rank=1))
@patch('torch.distributed.get_backend', return_value='hccl')
@patch('vllm_ascend.distributed.parallel_state.init_model_parallel_group')
@patch('vllm_ascend.distributed.parallel_state.model_parallel_initialized',
return_value=False)
def test_init_ascend_model_parallel_custom_params(
self, mock_mp_init, mock_init_group, mock_get_backend,
mock_world_group, mock_get_world_size, mock_is_init):
"""Test initialization with custom parallel sizes"""
# Act
init_ascend_model_parallel(expert_parallel_size=2,
expert_tensor_parallel_size=4,
world_size=8,
backend='hccl')
#Assert
mock_init_group.assert_any_call([[0, 4], [1, 5], [2, 6], [3, 7]],
1,
'hccl',
group_name="ep")
mock_init_group.assert_any_call([[0, 1, 2, 3], [4, 5, 6, 7]],
1,
'hccl',
group_name="etp")

View File

@@ -42,7 +42,6 @@ class TestAscendConfig(TestBase):
test_vllm_config = VllmConfig()
# No additional config given, check the default value here.
ascend_config = init_ascend_config(test_vllm_config)
self.assertEqual(ascend_config.expert_tensor_parallel_size, 0)
self.assertIsNone(ascend_config.expert_map_path)
torchair_graph_config = ascend_config.torchair_graph_config
@@ -75,12 +74,10 @@ class TestAscendConfig(TestBase):
"ascend_scheduler_config": {
"enabled": True
},
"expert_tensor_parallel_size": 1,
"expert_map_path": "test_expert_map_path",
"refresh": True
}
ascend_config = init_ascend_config(test_vllm_config)
self.assertEqual(ascend_config.expert_tensor_parallel_size, 1)
self.assertEqual(ascend_config.expert_map_path, "test_expert_map_path")
torchair_graph_config = ascend_config.torchair_graph_config

View File

@@ -28,7 +28,6 @@ class TestNPUPlatform(TestBase):
self.mock_vllm_config.speculative_config = None
self.mock_ascend_config = MagicMock()
self.mock_ascend_config.expert_tensor_parallel_size = 0
self.mock_ascend_config.torchair_graph_config.enabled = False
self.mock_ascend_config.ascend_scheduler_config.enabled = False
@@ -253,30 +252,6 @@ class TestNPUPlatform(TestBase):
mock_init_ascend.assert_called_once_with(self.mock_vllm_config)
mock_check_ascend.assert_called_once()
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
def test_check_and_update_config_expert_parallel_enabled(
self, mock_init_ascend, mock_check_ascend, mock_is_310p):
mock_init_ascend.return_value = self.mock_ascend_config
self.mock_vllm_config.parallel_config.enable_expert_parallel = True
self.mock_vllm_config.parallel_config.tensor_parallel_size = 2
self.mock_vllm_config.parallel_config.world_size_across_dp = 4
from vllm_ascend import platform
importlib.reload(platform)
self.platform.check_and_update_config(self.mock_vllm_config)
self.assertEqual(
self.mock_vllm_config.parallel_config.expert_tensor_parallel_size,
1)
self.assertEqual(
self.mock_vllm_config.parallel_config.expert_parallel_size,
self.mock_vllm_config.parallel_config.world_size_across_dp,
)
@patch("vllm_ascend.utils.is_310p", return_value=False)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")