mooncake connector support pipeline parallel & fix pp with flashcomm1 (#4054)

### What this PR does / why we need it?
To support pipeline parallel with PD disaggregation, this PR support PP
in mooncake connector and fix other bugs when enable pp with other
optimization params, including following changes:
- mooncake connector support pp in prefill, we do not support decode pp
currently
- fix bugs when enable both pp and flashcomm1
- optimize ascend-scheduler to support full batch in multiple pipeline
stages, original implementation would cause all pipeline stages
batch_size total summed to max_num_seq, which makes pipeline is not
full, this optimization can make all stages running with full batch_size
= max_num_seq, the same changes will contribute to vllm scheduler too.

### Does this PR introduce _any_ user-facing change?
add `pp_size` in mooncake connector kv_connector_extra_config
```
"kv_connector_extra_config": {
            "use_ascend_direct": true,
            "prefill": {
                    "dp_size": 1,
                    "tp_size": 4,
                    "pp_size": 4
             },
             "decode": {
                    "dp_size": 16,
                    "tp_size": 1
             }
        }
```

### How was this patch tested?

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: chenxiao <Jaychou1620@Gmail.com>
Signed-off-by: Kurumi5210 <Jaychou1620@Gmail.com>
Signed-off-by: Kurumi5210 <jaychou1620@gmail.com>
Signed-off-by: 秋刀鱼 <jaychou1620@Gmail.com>
Co-authored-by: chenxiao <Jaychou1620@Gmail.com>
Co-authored-by: zss <zss@qq.com>
Co-authored-by: zss <3265779424@qq.com>
This commit is contained in:
lidenghui1110
2025-12-10 16:01:43 +08:00
committed by GitHub
parent ce5872705e
commit a82b0fa70e
5 changed files with 394 additions and 141 deletions

View File

@@ -19,6 +19,20 @@ fake_engine = types.ModuleType("mooncake.engine")
fake_engine.TransferEngine = MagicMock() # type: ignore[attr-defined]
sys.modules["mooncake.engine"] = fake_engine
_mock_ascend_config = MagicMock(enable_kv_nz=False)
_mock_pp_group = MagicMock(rank_in_group=0, world_size=1)
_mock_tp_group = MagicMock(rank_in_group=0, world_size=4)
patch('vllm_ascend.distributed.mooncake_connector.get_pp_group',
return_value=_mock_pp_group).start()
patch('vllm_ascend.distributed.mooncake_connector.get_tp_group',
return_value=_mock_tp_group).start()
patch(
'vllm_ascend.distributed.mooncake_connector.get_tensor_model_parallel_world_size',
return_value=4).start()
patch(
'vllm_ascend.distributed.mooncake_connector.get_tensor_model_parallel_rank',
return_value=0).start()
from vllm_ascend.distributed.mooncake_connector import ( # noqa: E402
KVCacheRecvingThread, KVCacheSendingThread, KVCacheTaskTracker,
KVConnectorRole, MooncakeAgentMetadata, MooncakeConnector,
@@ -88,6 +102,7 @@ class TestKVCacheSendingThreadInit(unittest.TestCase):
'side_channel_host': 'localhost',
'side_channel_port': 5555,
'metadata': MagicMock(),
'vllm_config': MockVllmConfig(),
'ready_event': threading.Event(),
'kv_caches': kv_caches,
'pcp_rank': 0
@@ -130,6 +145,7 @@ class TestGetAndClearFinishedRequests(unittest.TestCase):
'prefill_tp_size': 4,
'local_engine_id': 'engine_1',
'side_channel_host': 'localhost',
'vllm_config': MockVllmConfig(),
'side_channel_port': 5555,
'metadata': {
"test": "metadata"
@@ -159,27 +175,32 @@ class TestKVCacheSendingThread(unittest.TestCase):
kv_caches_base_addr=[12345678],
num_blocks=2,
)
vllm_config = MockVllmConfig()
host = "127.0.0.1"
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', 0))
free_port = s.getsockname()[1]
base_port = s.getsockname()[1]
thread = KVCacheSendingThread(tp_rank=0,
prefill_tp_size=1,
local_engine_id="engine1",
side_channel_host=host,
side_channel_port=free_port,
side_channel_port=base_port,
metadata=metadata,
vllm_config=vllm_config,
ready_event=ready_event,
kv_caches={},
pcp_rank=0)
thread.start()
actual_port = base_port + (thread.pp_rank * thread.tp_size +
thread.tp_rank +
thread.pcp_rank * thread.prefill_tp_size)
self.assertTrue(ready_event.wait(timeout=3),
"Server thread startup timeout")
context = zmq.Context() # type: ignore
sock = context.socket(zmq.DEALER) # type: ignore
sock.connect(f"tcp://{host}:{free_port}")
sock.connect(f"tcp://{host}:{actual_port}")
encoder = msgspec.msgpack.Encoder()
decoder = msgspec.msgpack.Decoder(type=MooncakeAgentMetadata)
@@ -213,6 +234,7 @@ class TestKVCacheRecvingThreadBasic(unittest.TestCase):
self.thread = KVCacheRecvingThread(
tp_rank=0,
tp_size=4,
_prefill_pp_size=1,
engine=self.engine,
local_engine_id="local_engine",
local_handshake_port=5555,
@@ -231,7 +253,7 @@ class TestKVCacheRecvingThreadBasic(unittest.TestCase):
"remote_host": "localhost",
"remote_handshake_port": 6666,
"offset": 0,
"num_need_pulls": 2,
"tp_num_need_pulls": 2,
"all_task_done": False
}
self.thread.add_request(
@@ -242,7 +264,7 @@ class TestKVCacheRecvingThreadBasic(unittest.TestCase):
remote_host=test_req["remote_host"],
remote_handshake_port=test_req["remote_handshake_port"],
offset=test_req["offset"],
num_need_pulls=test_req["num_need_pulls"],
tp_num_need_pulls=test_req["tp_num_need_pulls"],
all_task_done=test_req["all_task_done"])
queued = self.thread.request_queue.get_nowait()
self.assertEqual(queued["request_id"], "req1")
@@ -265,6 +287,7 @@ class TestSocketManagement(unittest.TestCase):
self.thread = KVCacheRecvingThread(
tp_rank=0,
tp_size=4,
_prefill_pp_size=1,
engine=self.engine,
local_engine_id="local_engine",
local_handshake_port=5555,
@@ -315,10 +338,13 @@ class TestCoreFunctionality(unittest.TestCase):
self.ready_event = threading.Event()
self.mock_queue = MagicMock()
self.vllm_config = MockVllmConfig()
self.kv_caches: Dict[str, Any] = {}
self.kv_caches: Dict[str, Any] = {
"layer_0": (MagicMock(), MagicMock())
}
self.thread = KVCacheRecvingThread(
tp_rank=0,
tp_size=4,
_prefill_pp_size=1,
engine=self.engine,
local_engine_id="local_engine",
local_handshake_port=5555,
@@ -337,7 +363,7 @@ class TestCoreFunctionality(unittest.TestCase):
"remote_handshake_port": 6666,
"remote_transfer_port": 7777,
"offset": 0,
"num_need_pulls": 2,
"tp_num_need_pulls": 2,
"all_task_done": False
}
self.thread.task_tracker = MagicMock()
@@ -362,12 +388,14 @@ class TestCoreFunctionality(unittest.TestCase):
@patch.object(KVCacheRecvingThread, '_get_remote_metadata')
def test_transfer_kv_cache(self, mock_get_meta):
self.thread.kv_caches_base_addr["remote_engine"] = {
6666: [0x3000, 0x4000]
}
self.thread._transfer_kv_cache(self.test_req)
with patch(
'vllm_ascend.distributed.mooncake_connector.get_ascend_config'
) as mock_config:
mock_config.return_value.enable_kv_nz = False
self.thread.kv_caches_base_addr["remote_engine"] = {
6666: [0x3000, 0x4000]
}
self.thread._transfer_kv_cache(self.test_req)
self.engine.batch_transfer_sync_read.assert_called_once()
call_args, call_kwargs = self.engine.batch_transfer_sync_read.call_args
self.assertEqual(call_args[0], "localhost:7777")
@@ -398,6 +426,7 @@ class TestMetadataHandling(unittest.TestCase):
self.thread = KVCacheRecvingThread(
tp_rank=0,
tp_size=4,
_prefill_pp_size=1,
engine=self.engine,
local_engine_id="local_engine",
local_handshake_port=5555,
@@ -461,6 +490,7 @@ class TestMainThreadLoop(unittest.TestCase):
self.thread = KVCacheRecvingThread(
tp_rank=0,
tp_size=4,
_prefill_pp_size=1,
engine=self.engine,
local_engine_id="local_engine",
local_handshake_port=5555,
@@ -482,7 +512,7 @@ class TestMainThreadLoop(unittest.TestCase):
"remote_handshake_port": 6666,
"remote_transfer_port": 7777,
"offset": 0,
"num_need_pulls": 2,
"tp_num_need_pulls": 2,
"all_task_done": False
}
@@ -509,6 +539,10 @@ class MockVllmConfig:
self.parallel_config.tensor_parallel_size = 2
self.parallel_config.data_parallel_rank = 0
self.parallel_config.data_parallel_size_local = 1
self.parallel_config.pipeline_parallel_size = 1
self.parallel_config.data_parallel_rank_local = 0
self.model_config.get_num_layers_by_block_type = MagicMock(
return_value=32)
self.cache_config.block_size = 16
self.kv_transfer_config.kv_port = 5000
self.kv_transfer_config.kv_role = 'kv_producer'
@@ -516,11 +550,13 @@ class MockVllmConfig:
self.kv_transfer_config.get_from_extra_config.side_effect = lambda k, d: {
"prefill": {
"tp_size": 2,
"dp_size": 1
"dp_size": 1,
"pp_size": 1
},
"decode": {
"tp_size": 2,
"dp_size": 1
"dp_size": 1,
"pp_size": 1
}
}.get(k, d)
self.additional_config = {}
@@ -1062,12 +1098,13 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
patch('torch.Tensor.element_size', return_value=4),
patch('torch.Tensor.data_ptr', return_value=0x1000),
patch('math.prod', return_value=128),
patch('random.Random'),
patch(
'vllm_ascend.distributed.mooncake_connector.get_tensor_model_parallel_rank',
mock_get_tensor_model_parallel_rank),
patch('vllm_ascend.distributed.mooncake_connector.get_tp_group',
mock_get_tp_group),
patch('vllm_ascend.distributed.mooncake_connector.get_pp_group',
return_value=_mock_pp_group),
patch('vllm_ascend.distributed.mooncake_connector.get_ip',
mock_get_ip),
patch(
@@ -1096,8 +1133,6 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
patch(
'vllm_ascend.distributed.mooncake_connector.get_decode_context_model_parallel_world_size',
return_value=1),
patch('vllm_ascend.distributed.mooncake_connector.get_pcp_group',
return_value=self.mock_pcp_group),
patch(
'vllm_ascend.distributed.mooncake_connector.get_ascend_config',
return_value=MagicMock()),
@@ -1146,6 +1181,83 @@ class TestMooncakeConnectorWorker(unittest.TestCase):
# Default tp_rank is 0, so device_id should be 10
self.assertIsNotNone(worker.engine)
def test_get_remote_tp_rank(self):
def get_tp_rank(prefill_tp_size: int, prefill_pp_size: int,
decode_tp_size: int, num_kv_heads: int,
tp_num_need_pulls: int, is_deepseek_mla: bool):
with patch('vllm_ascend.distributed.mooncake_connector.get_ascend_config',
return_value=MagicMock()), \
patch.object(self.vllm_config.kv_transfer_config, 'get_from_extra_config',
side_effect=lambda k, d=None: {
"prefill": {"tp_size": prefill_tp_size, "dp_size": 1, "pp_size": prefill_pp_size},
"decode": {"tp_size": decode_tp_size, "dp_size": 1, "pp_size": 1}
}.get(k, d)):
self.vllm_config.model_config.hf_config.num_key_value_heads = num_kv_heads
self.vllm_config.model_config.is_deepseek_mla = is_deepseek_mla
worker = MooncakeConnectorWorker(self.vllm_config,
self.engine_id)
worker.tp_num_need_pulls = tp_num_need_pulls
worker.use_sparse = 0
return worker._get_remote_ranks_for_req('test')
self.assertIn(
get_tp_rank(16, 1, 1, 4, 4, False)[0],
[[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15]])
self.assertIn(
get_tp_rank(8, 1, 1, 4, 4, False)[0], [[0, 2, 4, 6], [1, 3, 5, 7]])
self.assertIn(get_tp_rank(4, 1, 1, 4, 4, False)[0], [[0, 1, 2, 3]])
self.assertIn(get_tp_rank(16, 1, 4, 4, 1, False),
[[[0], [4], [8], [12]], [[1], [5], [9], [13]],
[[2], [6], [10], [14]], [[3], [7], [11], [15]]])
self.assertIn(get_tp_rank(8, 1, 4, 4, 1, False),
[[[0], [2], [4], [6]], [[1], [3], [5], [7]]])
self.assertIn(get_tp_rank(4, 2, 2, 4, 2, False),
[[[0, 1, 4, 5], [2, 3, 6, 7]]])
self.assertIn(get_tp_rank(4, 1, 4, 4, 1, False),
[[[0], [1], [2], [3]]])
self.assertIn(
get_tp_rank(8, 2, 1, 4, 4, False)[0],
[[0, 2, 4, 6, 8, 10, 12, 14], [1, 3, 5, 7, 9, 11, 13, 15]])
self.assertIn(get_tp_rank(4, 2, 2, 4, 2, False),
[[[0, 1, 4, 5], [2, 3, 6, 7]]])
self.assertIn(get_tp_rank(2, 2, 1, 4, 2, False), [[[0, 1, 2, 3]]])
self.assertIn(
get_tp_rank(4, 4, 2, 8, 2, False),
[[[0, 1, 4, 5, 8, 9, 12, 13], [2, 3, 6, 7, 10, 11, 14, 15]]])
self.assertIn(
get_tp_rank(4, 2, 1, 4, 4, False)[0], [[0, 1, 2, 3, 4, 5, 6, 7]])
self.assertIn(
get_tp_rank(4, 4, 1, 4, 4, False)[0],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]])
self.assertIn(get_tp_rank(8, 2, 4, 4, 1, False),
[[[0, 8], [2, 10], [4, 12], [6, 14]],
[[1, 9], [3, 11], [5, 13], [7, 15]]])
self.assertIn(get_tp_rank(4, 2, 4, 4, 4, False),
[[[0, 4], [1, 5], [2, 6], [3, 7]]])
self.assertIn(
get_tp_rank(4, 4, 4, 4, 1, False),
[[[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15]]])
self.assertIn(
get_tp_rank(16, 1, 1, 1, 1,
True)[0], [[0], [1], [2], [3], [4], [5], [6], [7], [8],
[9], [10], [11], [12], [13], [14], [15]])
self.assertIn(get_tp_rank(4, 1, 4, 1, 1, True), [[[0], [1], [2], [3]]])
self.assertIn(
get_tp_rank(8, 2, 1, 1, 1, True)[0],
[[0, 8], [2, 10], [4, 12], [6, 14], [1, 9], [3, 11], [5, 13],
[7, 15]])
self.assertIn(
get_tp_rank(4, 4, 1, 1, 1, True)[0],
[[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15]])
self.assertIn(
get_tp_rank(8, 2, 4, 1, 1, True)[0],
[[0, 8], [2, 10], [4, 12], [6, 14], [1, 9], [3, 11], [5, 13],
[7, 15]])
self.assertIn(
get_tp_rank(4, 4, 4, 1, 1, True),
[[[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15]]])
if __name__ == '__main__':
unittest.main()