[CI] Refator multi-node CI (#3487)

### What this PR does / why we need it?
Refactor the multi-machine CI use case. The purpose of this PR is to
increase the ease of adding multi-machine CI use cases, allowing
developers to add multi-machine cluster model testing use cases
(including PD separation) by simply adding a new YAML configuration
file.
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang
2025-10-17 09:04:31 +08:00
committed by GitHub
parent ccb6fb9ec1
commit 4c4a8458a5
18 changed files with 632 additions and 437 deletions

View File

View File

@@ -0,0 +1,126 @@
# For disaggregated mode, set is_disaggregated: true, and set the following parameters:
# Prefiller_index: the hosts index of the node running prefiller
# Decoder_index: the hosts index of the node running decoder
# Suppose we have **4 nodes** running a 2P1D setup (2 Prefillers + 1 Decoder):
# ┌───────────────┬───────────────┬───────────────┬───────────────┐
# │ node0 │ node1 │ node2 │ node3 │
# │ Prefiller #1 │ Prefiller #2 │ Decoder │ Decoder │
# └───────────────┴───────────────┴───────────────┴───────────────┘
# For the prefiller nodes. the hosts should be node0 and node1
# For the decoder nodes. we only have 1 decoder node(dp+tp+ep across node2 and node3. Where node3 is running with headless mode)
# So the prefiller_host_index is [0, 1], and the decoder_host_index is [2]
test_name: "test DeepSeek-V3 disaggregated_prefill"
model: "vllm-ascend/DeepSeek-V3-W8A8"
num_nodes: 2
npu_per_node: 16
env_common:
VLLM_USE_MODELSCOPE: true
OMP_PROC_BIND: false
OMP_NUM_THREADS: 100
HCCL_BUFFSIZE: 1024
SERVER_PORT: 8080
disaggregated_prefill:
enabled: true
prefiller_host_index: [0]
decoder_host_index: [1]
deployment:
-
local_index: 0
master_index: 0
headless: false
env_extend:
server_cmd: >
vllm serve "vllm-ascend/DeepSeek-V3-W8A8"
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-size 2
--data-parallel-size-local 2
--tensor-parallel-size 8
--seed 1024
--enforce-eager
--enable-expert-parallel
--max-num-seqs 16
--max-model-len 8192
--max-num-batched-tokens 8192
--quantization ascend
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--kv-transfer-config
'{"kv_connector": "MooncakeConnector",
"kv_role": "kv_producer",
"kv_port": "30000",
"engine_id": "0",
"kv_connector_module_path": "vllm_ascend.distributed.mooncake_connector",
"kv_connector_extra_config": {
"prefill": {
"dp_size": 2,
"tp_size": 8
},
"decode": {
"dp_size": 2,
"tp_size": 8
}
}
}'
-
local_index: 1
master_index: 0
headless: true
env_extend:
server_cmd: >
vllm serve "vllm-ascend/DeepSeek-V3-W8A8"
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-size 2
--data-parallel-size-local 2
--tensor-parallel-size 8
--seed 1024
--quantization ascend
--max-num-seqs 16
--max-model-len 8192
--max-num-batched-tokens 8192
--enable-expert-parallel
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--additional-config '{"torchair_graph_config":{"enabled":true}}'
--kv-transfer-config
'{"kv_connector": "MooncakeConnector",
"kv_role": "kv_consumer",
"kv_port": "30200",
"engine_id": "1",
"kv_connector_module_path": "vllm_ascend.distributed.mooncake_connector",
"kv_connector_extra_config": {
"prefill": {
"dp_size": 2,
"tp_size": 8
},
"decode": {
"dp_size": 2,
"tp_size": 8
}
}
}'
benchmarks:
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 1
max_out_len: 2
batch_size: 1
baseline: 5
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/AIME2024
request_conf: vllm_api_general_chat
dataset_conf: aime2024/aime2024_gen_0_shot_chat_prompt
max_out_len: 10
batch_size: 32
baseline: 1
threshold: 1

View File

@@ -0,0 +1,76 @@
test_name: "test Qwen3-235B-A22B multi-dp"
model: "Qwen/Qwen3-235B-A22B"
num_nodes: 2
npu_per_node: 16
env_common:
VLLM_USE_MODELSCOPE: true
OMP_PROC_BIND: false
OMP_NUM_THREADS: 100
HCCL_BUFFSIZE: 1024
SERVER_PORT: 8080
deployment:
-
local_index: 0
master_index: 0
headless: false
env_extend:
server_cmd: >
vllm serve "Qwen/Qwen3-235B-A22B"
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-size 4
--data-parallel-size-local 2
--data-parallel-address $LOCAL_IP
--data-parallel-rpc-port 13389
--tensor-parallel-size 8
--seed 1024
--enable-expert-parallel
--max-num-seqs 16
--max-model-len 8192
--max-num-batched-tokens 8192
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
-
local_index: 1
master_index: 0
headless: true
env_extend:
server_cmd: >
vllm serve "Qwen/Qwen3-235B-A22B"
--headless
--data-parallel-size 4
--data-parallel-size-local 2
--data-parallel-start-rank 2
--data-parallel-address $MASTER_IP
--data-parallel-rpc-port 13389
--tensor-parallel-size 8
--seed 1024
--max-num-seqs 16
--max-model-len 8192
--max-num-batched-tokens 8192
--enable-expert-parallel
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
benchmarks:
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs400
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 1
max_out_len: 2
batch_size: 1
baseline: 5
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/AIME2024
request_conf: vllm_api_general_chat
dataset_conf: aime2024/aime2024_gen_0_shot_chat_prompt
max_out_len: 10
batch_size: 32
baseline: 1
threshold: 1

View File

@@ -0,0 +1,207 @@
import logging
import os
import subprocess
from typing import Optional
import regex as re
import yaml
from tests.e2e.nightly.multi_node.config.utils import (get_avaliable_port,
get_cluster_ips,
get_cur_ip,
get_net_interface,
setup_logger)
setup_logger()
logger = logging.getLogger(__name__)
DISAGGREGATED_PREFILL_PROXY_SCRIPT = "examples/disaggregated_prefill_v1/load_balance_proxy_layerwise_server_example.py"
class MultiNodeConfig:
def __init__(self,
model: str,
test_name: str,
num_nodes: int = 2,
npu_per_node: int = 16,
server_port: int = 8080,
headless: bool = False,
disaggregated_prefill: Optional[dict] = None,
envs: Optional[dict] = None,
server_cmd: str = "",
perf_cmd: Optional[str] = None,
acc_cmd: Optional[str] = None):
self.test_name = test_name
self.model = model
self.num_nodes = num_nodes
self.npu_per_node = npu_per_node
self.envs = envs if envs is not None else {}
self.server_port = server_port
if disaggregated_prefill:
self.proxy_port = get_avaliable_port()
self.headless = headless
self.server_cmd = server_cmd
self.perf_cmd = perf_cmd
self.acc_cmd = acc_cmd
assert perf_cmd is not None, "perf_cmd must be provided"
assert acc_cmd is not None, "acc_cmd must be provided"
assert server_cmd is not None, "server_cmd must be provided"
self.cur_index = os.getenv("LWS_WORKER_INDEX", 0)
self.cur_ip = get_cur_ip()
self.nic_name = get_net_interface(self.cur_ip)
self.cluster_ips = get_cluster_ips(num_nodes)
self.disaggregated_prefill = disaggregated_prefill
self._init_dist_env()
self.server_cmd = self._expand_env_vars(self.server_cmd, self.envs)
def _init_dist_env(self):
self.envs["HCCL_IF_IP"] = self.cur_ip
self.envs["GLOO_SOCKET_IFNAME"] = self.nic_name
self.envs["TP_SOCKET_IFNAME"] = self.nic_name
self.envs["HCCL_SOCKET_IFNAME"] = self.nic_name
self.envs["LOCAL_IP"] = self.cur_ip
self.envs["NIC_NAME"] = self.nic_name
self.envs["MASTER_IP"] = self.cluster_ips[0]
ascend_path = "/usr/local/Ascend/ascend-toolkit/latest/python/site-packages"
self.envs[
"LD_LIBRARY_PATH"] = f"{ascend_path}:{self.envs.get('LD_LIBRARY_PATH', os.environ.get('LD_LIBRARY_PATH', ''))}"
# keep the envs keys and values as strings
str_envs = {k: str(v) for k, v in self.envs.items()}
self.envs.clear()
self.envs.update(str_envs)
@staticmethod
def _expand_env_vars(cmd: str, env: dict) -> str:
"""Expand environment variables in the command string."""
cmd = str(cmd)
pattern = re.compile(r"\$(\w+)|\$\{(\w+)\}")
def replace_var(match):
var_name = match.group(1) or match.group(2)
return str(env.get(var_name, match.group(0)))
return pattern.sub(replace_var, cmd)
class _ProxyContext:
def __init__(self, outer, proxy_script):
self.outer = outer
self.proxy_script = proxy_script
self.process = None
def __enter__(self):
o = self.outer
if not o.disaggregated_prefill or not o.is_master:
logger.info(
"Disaggregated prefill not enabled or not master node, skipping proxy launch."
)
return self
prefiller_indices = o.disaggregated_prefill["prefiller_host_index"]
decoder_indices = o.disaggregated_prefill["decoder_host_index"]
common_indices = set(prefiller_indices) & set(decoder_indices)
assert not common_indices, f"Common indices found: {common_indices}"
assert o.proxy_port is not None, "proxy_port must be set"
prefiller_ips = [o.cluster_ips[i] for i in prefiller_indices]
decoder_ips = [o.cluster_ips[i] for i in decoder_indices]
prefiller_ports_list = [str(o.server_port)] * len(prefiller_ips)
decoder_ports_list = [str(o.server_port)] * len(decoder_ips)
proxy_cmd = [
"python",
self.proxy_script,
"--host",
o.cur_ip,
"--port",
str(o.proxy_port),
"--prefiller-hosts",
*prefiller_ips,
"--prefiller-ports",
*prefiller_ports_list,
"--decoder-hosts",
*decoder_ips,
"--decoder-ports",
*decoder_ports_list,
]
env = os.environ.copy()
env.update(o.envs)
logger.info(f"Launching proxy: {' '.join(proxy_cmd)}")
self.process = subprocess.Popen(proxy_cmd, env=env)
o.proxy_process = self.process
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.process:
logger.info("Terminating proxy server process...")
try:
self.process.terminate()
self.process.wait(timeout=5)
except subprocess.TimeoutExpired:
logger.warning(
"Proxy process did not terminate, killing it...")
self.process.kill()
logger.info("Proxy server process terminated.")
def launch_server_proxy(self, proxy_script: str):
"""Return a context manager that launches the proxy server if disaggregated prefill is enabled."""
return self._ProxyContext(self, proxy_script)
@classmethod
def from_yaml(cls, yaml_path: Optional[str] = None):
if not yaml_path:
yaml_path = os.getenv(
"CONFIG_YAML_PATH",
"tests/e2e/nightly/multi_node/config/models/DeepSeek-V3.yaml")
with open(yaml_path, 'r') as file:
config_data = yaml.safe_load(file)
test_name = config_data.get("test_name", "default_test")
model = config_data.get("model", "default_model")
envs = config_data.get("env_common", {})
num_nodes = config_data.get("num_nodes", 2)
npu_per_node = config_data.get("npu_per_node", 16)
disaggregated_prefill = config_data.get("disaggregated_prefill")
# If disaggregated_prefill is set, override server_port to an available port for proxy running
server_port = config_data.get("server_port", 8080)
deployments = config_data.get("deployment", [])
assert len(deployments) == num_nodes, \
f"Number of deployments ({len(deployments)}) must match num_nodes ({num_nodes})"
for deployment in deployments:
if deployment.get("local_index") == int(
os.getenv("LWS_WORKER_INDEX", 0)):
envs_extend = deployment.get("env_extend", {})
if envs_extend:
envs.update(envs_extend)
server_cmd = deployment.get("server_cmd")
headless = deployment.get("headless", False)
break
benchmarks = config_data.get("benchmarks", {})
assert benchmarks is not None, "benchmarks must be provided"
perf_cmd = benchmarks["perf"]
acc_cmd = benchmarks["acc"]
return cls(model=model,
test_name=test_name,
num_nodes=num_nodes,
npu_per_node=npu_per_node,
envs=envs,
server_port=server_port,
headless=headless,
disaggregated_prefill=disaggregated_prefill,
server_cmd=server_cmd,
perf_cmd=perf_cmd,
acc_cmd=acc_cmd)
@property
def world_size(self):
return self.num_nodes * self.npu_per_node
@property
def is_master(self):
return int(self.cur_index) == 0

View File

@@ -0,0 +1,95 @@
import logging
import os
import socket
from contextlib import contextmanager
from typing import Optional
import psutil
# import torch.distributed as dist
@contextmanager
def temp_env(env_dict):
old_env = {}
for k, v in env_dict.items():
old_env[k] = os.environ.get(k)
os.environ[k] = str(v)
try:
yield
finally:
for k, v in old_env.items():
if v is None:
os.environ.pop(k, None)
else:
os.environ[k] = v
# @contextmanager
# def dist_group(backend="gloo"):
# if dist.is_initialized():
# yield
# return
# dist.init_process_group(backend=backend)
# try:
# yield
# finally:
# dist.destroy_process_group()
def get_cluster_ips(word_size: int = 2) -> list[str]:
"""
Returns the IP addresses of all nodes in the cluster.
0: leader
1~N-1: workers
"""
leader_dns = os.getenv("LWS_LEADER_ADDRESS")
if not leader_dns:
raise RuntimeError("LWS_LEADER_ADDRESS is not set")
cluster_dns = [leader_dns]
for i in range(1, word_size):
cur_dns = f"vllm-0-{i}.vllm.vllm-project"
cluster_dns.append(cur_dns)
return [socket.gethostbyname(dns) for dns in cluster_dns]
def get_avaliable_port(start_port: int = 6000, end_port: int = 7000) -> int:
import socket
for port in range(start_port, end_port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(("", port))
return port
except OSError:
continue
raise RuntimeError("No available port found")
def get_cur_ip() -> str:
"""Returns the current machine's IP address."""
return socket.gethostbyname_ex(socket.gethostname())[2][0]
def get_net_interface(ip: Optional[str] = None) -> Optional[str]:
"""
Returns specified IP's inetwork interface.
If no IP is provided, uses the first from hostname -I.
"""
if ip is None:
ip = get_cur_ip()
for iface, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if addr.family == socket.AF_INET and addr.address == ip:
return iface
return None
def setup_logger():
"""Setup logging configuration."""
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)

View File

@@ -0,0 +1,132 @@
apiVersion: leaderworkerset.x-k8s.io/v1
kind: LeaderWorkerSet
metadata:
name: vllm
namespace: vllm-project
spec:
replicas: 1
leaderWorkerTemplate:
size: 2
restartPolicy: RecreateGroupOnPodRestart
leaderTemplate:
metadata:
labels:
role: leader
spec:
containers:
- name: vllm-leader
image: m.daocloud.io/quay.io/ascend/cann:8.2.rc1-a3-ubuntu22.04-py3.11
env:
- name: WORKSPACE
value: "/root/workspace"
# Set vLLM version and vLLM-Ascend version here, once there is a new release, update here.
- name: VLLM_VERSION
value: "v0.11.0"
- name: VLLM_ASCEND_VERSION
value: "main"
- name: MOONCAKE_VERSION
value: "06cc217504a6f1b0cdaa26b096b985651b262748"
command:
- sh
- -c
- |
bash /root/.cache/tests/run.sh
tail -f /dev/null
resources:
limits:
huawei.com/ascend-1980: "16"
memory: 512Gi
ephemeral-storage: 100Gi
requests:
huawei.com/ascend-1980: "16"
ephemeral-storage: 100Gi
cpu: 125
ports:
- containerPort: 8080
# readinessProbe:
# tcpSocket:
# port: 8080
# initialDelaySeconds: 15
# periodSeconds: 10
volumeMounts:
- mountPath: /root/.cache
name: shared-volume
- mountPath: /usr/local/Ascend/driver/tools
name: driver-tools
- mountPath: /dev/shm
name: dshm
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 15Gi
- name: shared-volume
persistentVolumeClaim:
claimName: nv-action-vllm-benchmarks-v2
- name: driver-tools
hostPath:
path: /usr/local/Ascend/driver/tools
workerTemplate:
spec:
containers:
- name: vllm-worker
image: m.daocloud.io/quay.io/ascend/cann:8.2.rc1-a3-ubuntu22.04-py3.11
env:
- name: WORKSPACE
value: "/root/workspace"
# Set vLLM version and vLLM-Ascend version here, once there is a new release, update here.
- name: VLLM_VERSION
value: "v0.11.0"
- name: VLLM_ASCEND_VERSION
value: "main"
- name: MOONCAKE_VERSION
value: "06cc217504a6f1b0cdaa26b096b985651b262748"
command:
- sh
- -c
- |
bash /root/.cache/tests/run.sh
tail -f /dev/null
resources:
limits:
huawei.com/ascend-1980: "16"
memory: 512Gi
ephemeral-storage: 100Gi
requests:
huawei.com/ascend-1980: "16"
ephemeral-storage: 100Gi
cpu: 125
volumeMounts:
- mountPath: /root/.cache
name: shared-volume
- mountPath: /usr/local/Ascend/driver/tools
name: driver-tools
- mountPath: /dev/shm
name: dshm
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 15Gi
- name: shared-volume
persistentVolumeClaim:
claimName: nv-action-vllm-benchmarks-v2
- name: driver-tools
hostPath:
path: /usr/local/Ascend/driver/tools
---
apiVersion: v1
kind: Service
metadata:
name: vllm-leader
namespace: vllm-project
spec:
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector:
leaderworkerset.sigs.k8s.io/name: vllm
role: leader
type: ClusterIP

View File

@@ -0,0 +1,145 @@
#!/bin/bash
set -euo pipefail
export SRC_DIR="$WORKSPACE/source_code"
check_npu_info() {
echo "====> Check NPU info"
npu-smi info
cat "/usr/local/Ascend/ascend-toolkit/latest/$(uname -i)-linux/ascend_toolkit_install.info"
}
check_and_config() {
echo "====> Configure mirrors and git proxy"
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf "https://github.com/"
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
}
checkout_src() {
echo "====> Checkout source code"
mkdir -p "$SRC_DIR"
# vllm-ascend
if [ ! -d "$SRC_DIR/vllm-ascend" ]; then
git clone --depth 1 -b $VLLM_ASCEND_VERSION https://github.com/vllm-project/vllm-ascend.git "$SRC_DIR/vllm-ascend"
fi
# vllm
if [ ! -d "$SRC_DIR/vllm" ]; then
git clone -b $VLLM_VERSION https://github.com/vllm-project/vllm.git "$SRC_DIR/vllm"
fi
#mooncake
if [ ! -d "$SRC_DIR/Mooncake" ]; then
git clone -b pooling_async_memecpy_v1 https://github.com/AscendTransport/Mooncake "$SRC_DIR/Mooncake"
fi
}
install_sys_dependencies() {
echo "====> Install system dependencies"
apt-get update -y
DEP_LIST=()
while IFS= read -r line; do
[[ -n "$line" && ! "$line" =~ ^# ]] && DEP_LIST+=("$line")
done < "$SRC_DIR/vllm-ascend/packages.txt"
apt-get install -y "${DEP_LIST[@]}" gcc g++ cmake libnuma-dev iproute2
}
install_vllm() {
echo "====> Install vllm and vllm-ascend"
VLLM_TARGET_DEVICE=empty pip install -e "$SRC_DIR/vllm"
pip install -e "$SRC_DIR/vllm-ascend"
pip install modelscope
# Install for pytest
pip install -r "$SRC_DIR/vllm-ascend/requirements-dev.txt"
}
install_mooncake() {
echo "====> Install mooncake"
apt-get update -y
apt-get install -y --no-install-recommends mpich libmpich-dev
cd $SRC_DIR/Mooncake
bash dependencies.sh --yes
apt purge mpich libmpich-dev -y
apt purge openmpi-bin -y
apt purge openmpi-bin libopenmpi-dev -y
apt install mpich libmpich-dev -y
export CPATH=/usr/lib/aarch64-linux-gnu/mpich/include/:$CPATH
export CPATH=/usr/lib/aarch64-linux-gnu/openmpi/lib:$CPATH
mkdir build
cd -
cd $SRC_DIR/Mooncake/build
cmake ..
make -j
make install
cp mooncake-transfer-engine/src/transport/ascend_transport/hccl_transport/ascend_transport_c/libascend_transport_mem.so /usr/local/Ascend/ascend-toolkit/latest/python/site-packages/
cp mooncake-transfer-engine/src/libtransfer_engine.so /usr/local/Ascend/ascend-toolkit/latest/python/site-packages/
cd -
}
kill_npu_processes() {
pgrep python3 | xargs -r kill -9
pgrep VLLM | xargs -r kill -9
sleep 4
}
run_tests() {
echo "====> Run tests"
shopt -s nullglob
declare -A results
local total=0
local passed=0
local failed=0
local REPORT_FILE="/root/.cache/test_summary.md"
echo "#Nightly Multi-node Test Summary" > "$REPORT_FILE"
echo "" >> "$REPORT_FILE"
echo "| Config File | Result |" >> "$REPORT_FILE"
echo "|--------------|---------|" >> "$REPORT_FILE"
for file in tests/e2e/nightly/multi_node/config/models/*.yaml; do
export CONFIG_YAML_PATH="$file"
echo "Running test with config: $CONFIG_YAML_PATH"
if pytest -sv tests/e2e/nightly/multi_node/test_multi_node.py; then
results["$file"]="✅ PASS"
((passed++))
else
results["$file"]="❌ FAIL"
((failed++))
fi
((total++))
echo "| \`$file\` | ${results[$file]} |" >> "$REPORT_FILE"
echo "------------------------------------------"
kill_npu_processes
done
shopt -u nullglob
echo "" >> "$REPORT_FILE"
echo "## Summary" >> "$REPORT_FILE"
echo "- **Total:** $total" >> "$REPORT_FILE"
echo "- **Passed:** $passed" >> "$REPORT_FILE"
echo "- **Failed:** $failed" >> "$REPORT_FILE"
echo
echo "✅ Markdown report written to: $REPORT_FILE"
}
main() {
check_npu_info
check_and_config
checkout_src
install_sys_dependencies
install_vllm
install_mooncake
run_tests
}
main "$@"

View File

@@ -0,0 +1,30 @@
from tests.e2e.conftest import RemoteOpenAIServer
from tests.e2e.nightly.multi_node.config.multi_node_config import (
DISAGGREGATED_PREFILL_PROXY_SCRIPT, MultiNodeConfig)
def test_multi_node() -> None:
config = MultiNodeConfig.from_yaml()
env_dict = config.envs
# perf_cmd = config.perf_cmd
# acc_cmd = config.acc_cmd
server_port = config.server_port if not config.disaggregated_prefill else config.proxy_port
server_host = config.cluster_ips[0]
with config.launch_server_proxy(DISAGGREGATED_PREFILL_PROXY_SCRIPT):
with RemoteOpenAIServer(
model=config.model,
vllm_serve_args=config.server_cmd,
server_port=server_port,
server_host=server_host,
env_dict=env_dict,
auto_port=False,
max_wait_seconds=2000,
) as remote_server:
# base_url = remote_server.url_root
if config.is_master:
pass
# TODO: enable perf and acc test
# subprocess.run(perf_cmd, check=True)
# subprocess.run(acc_cmd, check=True)
else:
remote_server.hang_until_terminated()