[CI][Doc] Optimize multi-node CI (#3565)

### What this PR does / why we need it?
This pull request mainly do the following things:
1. Add a doc for multi-node CI, The main content is the mechanism
principle and how to contribute
2. Simplify the config yaml for more developer-friendly
3. Optimized the mooncake installation script to prevent accidental
failures during installation
4. Fix the workflow to ensure the kubernetes can be apply correctly
5. Add Qwen3-235B-W8A8 disaggregated_prefill test
6. Add GLM-4.5 multi dp test
7. Add 2p1d 4nodes disaggregated_prefill test
8. Refactor nightly tests
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?


- vLLM version: v0.11.0rc3
- vLLM main:
17c540a993

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang
2025-10-25 09:23:47 +08:00
committed by GitHub
parent 292cf339c3
commit 7f73c28a24
21 changed files with 1165 additions and 378 deletions

View File

@@ -49,6 +49,7 @@ from vllm.utils import get_open_port
from tests.e2e.model_utils import (TokensTextLogprobs,
TokensTextLogprobsPromptLogprobs)
from tests.e2e.nightly.multi_node.config.multi_node_config import NodeInfo
from vllm_ascend.ascend_config import clear_ascend_config
# TODO: remove this part after the patch merged into vllm, if
# we not explicitly patch here, some of them might be effectiveless
@@ -115,6 +116,9 @@ class RemoteOpenAIServer:
env_dict: Optional[dict[str, str]] = None,
seed: Optional[int] = None,
auto_port: bool = True,
nodes_info: Optional[list[NodeInfo]] = None,
disaggregated_prefill: Optional[dict] = None,
proxy_port: Optional[int] = None,
max_wait_seconds: Optional[float] = None,
override_hf_configs: Optional[dict[str, Any]] = None) -> None:
if isinstance(vllm_serve_args, str):
@@ -144,13 +148,23 @@ class RemoteOpenAIServer:
"--hf-overrides",
json.dumps(override_hf_configs)
]
self.host = str(server_host)
self.port = int(server_port)
# for multi-nodes test
self.nodes_info = nodes_info
self.disaggregated_prefill = disaggregated_prefill
self.cur_index = os.getenv("LWS_WORKER_INDEX", 0)
self.proxy_port = proxy_port
self._start_server(model, vllm_serve_args, env_dict)
max_wait_seconds = max_wait_seconds or 7200
self._wait_for_server(url=self.url_for("health"),
timeout=max_wait_seconds)
if self.disaggregated_prefill:
assert proxy_port is not None, "for disaggregated_prefill, proxy port must be provided"
self._wait_for_server_pd(proxy_port=proxy_port)
else:
self._wait_for_server(url=self.url_for("health"),
timeout=max_wait_seconds)
def __enter__(self):
return self
@@ -187,6 +201,21 @@ class RemoteOpenAIServer:
if isinstance(client, httpx.Client):
client.close()
def _wait_for_server_pd(self, proxy_port: int):
# Wait for all api_server nodes ready
assert self.nodes_info is not None, "cluster info must be provided"
for node_info in self.nodes_info:
if node_info.headless:
continue
url_health = f"http://{node_info.ip}:{node_info.server_port}/health"
self._wait_for_server(url=url_health, timeout=7200)
# Wait for proxy ready
master_node = self.nodes_info[0]
url_proxy = f"http://{master_node.ip}:{proxy_port}/healthcheck"
self._wait_for_server(url=url_proxy, timeout=7200)
def _wait_for_server(self, *, url: str, timeout: float):
# run health check
start = time.time()