[1/N] Refactor nightly test structure (#5479)
### What this PR does / why we need it?
This patch is a series of refactoring actions, including clarifying the
directory structure of nightly tests, refactoring the config retrieval
logic, and optimizing the workflow, etc. This is the first step:
refactoring the directory structure of nightly to make it more readable
and logical.
- vLLM version: v0.13.0
- vLLM main:
5326c89803
Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
@@ -17,7 +17,7 @@ env_common:
|
||||
disaggregated_prefill:
|
||||
enabled: true
|
||||
prefiller_host_index: [0, 1]
|
||||
decoder_host_index: [2]
|
||||
decoder_host_index: [2, 3]
|
||||
|
||||
deployment:
|
||||
-
|
||||
@@ -16,7 +16,7 @@ env_common:
|
||||
disaggregated_prefill:
|
||||
enabled: true
|
||||
prefiller_host_index: [0, 1]
|
||||
decoder_host_index: [2]
|
||||
decoder_host_index: [2, 3]
|
||||
|
||||
deployment:
|
||||
-
|
||||
@@ -1,285 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
import regex as re
|
||||
import yaml
|
||||
|
||||
from tests.e2e.nightly.multi_node.config.utils import (get_all_ipv4,
|
||||
get_avaliable_port,
|
||||
get_cluster_ips,
|
||||
get_net_interface,
|
||||
setup_logger)
|
||||
|
||||
setup_logger()
|
||||
logger = logging.getLogger(__name__)
|
||||
DISAGGEGATED_PREFILL_PORT = 5333
|
||||
CONFIG_BASE_PATH = "tests/e2e/nightly/multi_node/config/models/"
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeInfo:
|
||||
index: int
|
||||
ip: str
|
||||
server_cmd: str
|
||||
headless: bool
|
||||
server_port: int
|
||||
|
||||
def __str__(self):
|
||||
return (f"NodeInfo:\n"
|
||||
f" index={self.index}\n"
|
||||
f" ip={self.ip}\n"
|
||||
f" headless={self.headless}\n"
|
||||
f" server_port={self.server_port}")
|
||||
|
||||
|
||||
class MultiNodeConfig:
|
||||
|
||||
def __init__(self,
|
||||
model: str,
|
||||
test_name: str,
|
||||
nodes_info: list[NodeInfo],
|
||||
npu_per_node: int = 16,
|
||||
server_port: int = 8080,
|
||||
disaggregated_prefill: Optional[dict] = None,
|
||||
envs: Optional[dict] = None,
|
||||
perf_cmd: Optional[str] = None,
|
||||
acc_cmd: Optional[str] = None):
|
||||
self.test_name = test_name
|
||||
self.model = model
|
||||
self.nodes_info = nodes_info
|
||||
# We assume the first index of nodes as the master
|
||||
# NOTE: this may be different in the scenarios like disaggregated prefill
|
||||
# There may be multi groups of nodes, and the master of each group may be different
|
||||
self.master_ip = self.nodes_info[0].ip
|
||||
self.num_nodes = len(self.nodes_info)
|
||||
self.npu_per_node = npu_per_node
|
||||
self.server_port = server_port
|
||||
self.envs = envs if envs is not None else {}
|
||||
self.proxy_port = get_avaliable_port()
|
||||
self.perf_cmd = perf_cmd
|
||||
self.acc_cmd = acc_cmd
|
||||
|
||||
self.disaggregated_prefill = disaggregated_prefill
|
||||
self._init_disaggregated_prefill()
|
||||
|
||||
self._init_dist_env()
|
||||
self.server_cmd = self._expand_env_vars(self.node_info.server_cmd,
|
||||
self.envs)
|
||||
|
||||
@property
|
||||
def cur_ip(self):
|
||||
return self.nodes_info[self.cur_index].ip
|
||||
|
||||
@property
|
||||
def nic_name(self):
|
||||
return get_net_interface(self.cur_ip)
|
||||
|
||||
@property
|
||||
def node_info(self):
|
||||
return self.nodes_info[self.cur_index]
|
||||
|
||||
@property
|
||||
def cur_index(self):
|
||||
# 1. Try to read worker index from K8s environment variable
|
||||
worker_index = os.environ.get("LWS_WORKER_INDEX")
|
||||
if worker_index:
|
||||
return int(worker_index)
|
||||
|
||||
# 2. Fallback: match local IP against cluster IP list
|
||||
cluster_ips = [node.ip for node in self.nodes_info]
|
||||
cluster_ip_set = set(cluster_ips)
|
||||
|
||||
cur_ips = get_all_ipv4()
|
||||
|
||||
for ip in cur_ips:
|
||||
if ip in cluster_ip_set:
|
||||
return cluster_ips.index(ip)
|
||||
|
||||
raise RuntimeError(
|
||||
"Could not determine current node index: no matching IP.\n"
|
||||
f"Local machine IPs: {cur_ips}\n"
|
||||
f"Cluster IPs: {cluster_ips}\n"
|
||||
"Please check your config file or network settings.")
|
||||
|
||||
def _init_disaggregated_prefill(self):
|
||||
if self.disaggregated_prefill:
|
||||
decode_host_index = self.disaggregated_prefill.get(
|
||||
"decoder_host_index")
|
||||
if not decode_host_index:
|
||||
raise RuntimeError("got empty decode_host_index")
|
||||
self.decode_start_index: int = decode_host_index[0]
|
||||
self.num_prefillers = self.decode_start_index
|
||||
self.num_decoders = self.num_nodes - self.num_prefillers
|
||||
|
||||
def _init_dist_env(self):
|
||||
self.envs["HCCL_IF_IP"] = self.cur_ip
|
||||
self.envs["GLOO_SOCKET_IFNAME"] = self.nic_name
|
||||
self.envs["TP_SOCKET_IFNAME"] = self.nic_name
|
||||
self.envs["HCCL_SOCKET_IFNAME"] = self.nic_name
|
||||
self.envs["LOCAL_IP"] = self.cur_ip
|
||||
self.envs["NIC_NAME"] = self.nic_name
|
||||
|
||||
master_ip = self.master_ip
|
||||
if self.disaggregated_prefill:
|
||||
if self.cur_index < self.decode_start_index:
|
||||
# For prefiller nodes, use the default master ip(index==0) as DP master
|
||||
master_ip = self.master_ip
|
||||
else:
|
||||
# For decoder nodes, use the first decoder node as DP master
|
||||
master_ip = self.nodes_info[self.decode_start_index].ip
|
||||
|
||||
self.envs["MASTER_IP"] = master_ip
|
||||
ascend_path = "/usr/local/Ascend/ascend-toolkit/latest/python/site-packages"
|
||||
self.envs[
|
||||
"LD_LIBRARY_PATH"] = f"{ascend_path}:{self.envs.get('LD_LIBRARY_PATH', os.environ.get('LD_LIBRARY_PATH', ''))}"
|
||||
|
||||
# keep the envs keys and values as strings
|
||||
str_envs = {k: str(v) for k, v in self.envs.items()}
|
||||
self.envs.clear()
|
||||
self.envs.update(str_envs)
|
||||
|
||||
@staticmethod
|
||||
def _expand_env_vars(cmd: str, env: dict) -> str:
|
||||
"""Expand environment variables in the command string."""
|
||||
cmd = str(cmd)
|
||||
pattern = re.compile(r"\$(\w+)|\$\{(\w+)\}")
|
||||
|
||||
def replace_var(match):
|
||||
var_name = match.group(1) or match.group(2)
|
||||
return str(env.get(var_name, match.group(0)))
|
||||
|
||||
return pattern.sub(replace_var, cmd)
|
||||
|
||||
class _ProxyContext:
|
||||
|
||||
def __init__(self, outer, proxy_script):
|
||||
self.outer = outer
|
||||
self.proxy_script = proxy_script
|
||||
self.process = None
|
||||
|
||||
def __enter__(self):
|
||||
o = self.outer
|
||||
if not o.disaggregated_prefill or not o.is_master:
|
||||
logger.info(
|
||||
"Disaggregated prefill not enabled or not master node, skipping proxy launch."
|
||||
)
|
||||
return self
|
||||
|
||||
prefiller_indices = o.disaggregated_prefill["prefiller_host_index"]
|
||||
decoder_indices = o.disaggregated_prefill["decoder_host_index"]
|
||||
|
||||
common_indices = set(prefiller_indices) & set(decoder_indices)
|
||||
assert not common_indices, f"Common indices found: {common_indices}"
|
||||
assert o.proxy_port is not None, "proxy_port must be set"
|
||||
|
||||
cluster_ips = [node.ip for node in o.nodes_info]
|
||||
prefiller_ips = [cluster_ips[i] for i in prefiller_indices]
|
||||
decoder_ips = [cluster_ips[i] for i in decoder_indices]
|
||||
prefiller_ports_list = [str(o.server_port)] * len(prefiller_ips)
|
||||
decoder_ports_list = [str(o.server_port)] * len(decoder_ips)
|
||||
|
||||
proxy_cmd = [
|
||||
"python",
|
||||
self.proxy_script,
|
||||
"--host",
|
||||
o.cur_ip,
|
||||
"--port",
|
||||
str(o.proxy_port),
|
||||
"--prefiller-hosts",
|
||||
*prefiller_ips,
|
||||
"--prefiller-ports",
|
||||
*prefiller_ports_list,
|
||||
"--decoder-hosts",
|
||||
*decoder_ips,
|
||||
"--decoder-ports",
|
||||
*decoder_ports_list,
|
||||
]
|
||||
|
||||
env = os.environ.copy()
|
||||
env.update(o.envs)
|
||||
logger.info(f"Launching proxy: {' '.join(proxy_cmd)}")
|
||||
|
||||
self.process = subprocess.Popen(proxy_cmd, env=env)
|
||||
o.proxy_process = self.process
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.process:
|
||||
logger.info("Terminating proxy server process...")
|
||||
try:
|
||||
self.process.terminate()
|
||||
self.process.wait(timeout=5)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning(
|
||||
"Proxy process did not terminate, killing it...")
|
||||
self.process.kill()
|
||||
logger.info("Proxy server process terminated.")
|
||||
|
||||
def launch_server_proxy(self, proxy_script: str):
|
||||
"""Return a context manager that launches the proxy server if disaggregated prefill is enabled."""
|
||||
return self._ProxyContext(self, proxy_script)
|
||||
|
||||
@classmethod
|
||||
def from_yaml(cls, yaml_path: Optional[str] = None):
|
||||
if not yaml_path:
|
||||
yaml_path = os.getenv("CONFIG_YAML_PATH", "DeepSeek-V3.yaml")
|
||||
yaml_path = os.path.join(CONFIG_BASE_PATH, yaml_path)
|
||||
with open(yaml_path, 'r') as file:
|
||||
config_data = yaml.safe_load(file)
|
||||
test_name = config_data.get("test_name", "default_test")
|
||||
model = config_data.get("model", "default_model")
|
||||
envs = config_data.get("env_common", {})
|
||||
num_nodes = config_data.get("num_nodes", 2)
|
||||
npu_per_node = config_data.get("npu_per_node", 16)
|
||||
disaggregated_prefill = config_data.get("disaggregated_prefill")
|
||||
# If disaggregated_prefill is set, override server_port to an available port for proxy running
|
||||
server_port = config_data.get("server_port", 8080)
|
||||
|
||||
deployments = config_data.get("deployment", [])
|
||||
assert len(deployments) == num_nodes, \
|
||||
f"Number of deployments ({len(deployments)}) must match num_nodes ({num_nodes})"
|
||||
cluster_ips = config_data.get("cluster_hosts", None)
|
||||
if cluster_ips:
|
||||
assert len(cluster_ips) == num_nodes, \
|
||||
"Must provide cluster_ips for all nodes if it is explicitly specified."
|
||||
else:
|
||||
logger.info("Resolving cluster IPs via DNS...")
|
||||
cluster_ips = get_cluster_ips(num_nodes)
|
||||
nodes_info = []
|
||||
|
||||
for index, deployment in enumerate(deployments):
|
||||
# after assert len(deployments) == num_nodes, we can assume that this will must have a match
|
||||
server_cmd = deployment.get("server_cmd", "")
|
||||
headless = "--headless" in server_cmd
|
||||
nodes_info.append(
|
||||
NodeInfo(ip=cluster_ips[index],
|
||||
index=index,
|
||||
headless=headless,
|
||||
server_port=server_port,
|
||||
server_cmd=server_cmd))
|
||||
|
||||
benchmarks = config_data.get("benchmarks") or {}
|
||||
assert benchmarks is not None, "benchmarks must be provided"
|
||||
perf_cmd = benchmarks.get("perf")
|
||||
acc_cmd = benchmarks.get("acc")
|
||||
|
||||
return cls(model=model,
|
||||
test_name=test_name,
|
||||
npu_per_node=npu_per_node,
|
||||
envs=envs,
|
||||
server_port=server_port,
|
||||
disaggregated_prefill=disaggregated_prefill,
|
||||
nodes_info=nodes_info,
|
||||
perf_cmd=perf_cmd,
|
||||
acc_cmd=acc_cmd)
|
||||
|
||||
@property
|
||||
def world_size(self):
|
||||
return self.num_nodes * self.npu_per_node
|
||||
|
||||
@property
|
||||
def is_master(self):
|
||||
return self.cur_index == 0
|
||||
@@ -1,129 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from typing import Optional
|
||||
|
||||
import psutil
|
||||
|
||||
|
||||
@contextmanager
|
||||
def temp_env(env_dict):
|
||||
old_env = {}
|
||||
for k, v in env_dict.items():
|
||||
old_env[k] = os.environ.get(k)
|
||||
os.environ[k] = str(v)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for k, v in old_env.items():
|
||||
if v is None:
|
||||
os.environ.pop(k, None)
|
||||
else:
|
||||
os.environ[k] = v
|
||||
|
||||
|
||||
def dns_resolver(retries: int = 240, base_delay: float = 0.5):
|
||||
# We should resolve DNS with retries to avoid transient network issues.
|
||||
# When the pod is just started, DNS resolution may fail.
|
||||
def resolve(dns: str):
|
||||
delay = base_delay
|
||||
for attempt in range(retries):
|
||||
try:
|
||||
return socket.gethostbyname(dns)
|
||||
except socket.gaierror:
|
||||
if attempt == retries - 1:
|
||||
raise
|
||||
time.sleep(delay)
|
||||
delay = min(delay * 1.5, 5)
|
||||
|
||||
return resolve
|
||||
|
||||
|
||||
def get_cluster_dns_list(word_size: int) -> list[str]:
|
||||
leader_dns = os.getenv("LWS_LEADER_ADDRESS")
|
||||
if not leader_dns:
|
||||
raise RuntimeError("LWS_LEADER_ADDRESS is not set")
|
||||
|
||||
workers = [f"vllm-0-{i}.vllm.vllm-project" for i in range(1, word_size)]
|
||||
return [leader_dns] + workers
|
||||
|
||||
|
||||
def get_cluster_ips(word_size: int = 2) -> list[str]:
|
||||
resolver = dns_resolver()
|
||||
return [resolver(dns) for dns in get_cluster_dns_list(word_size)]
|
||||
|
||||
|
||||
def get_avaliable_port(start_port: int = 6000, end_port: int = 7000) -> int:
|
||||
import socket
|
||||
for port in range(start_port, end_port):
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
try:
|
||||
s.bind(("", port))
|
||||
return port
|
||||
except OSError:
|
||||
continue
|
||||
raise RuntimeError("No available port found")
|
||||
|
||||
|
||||
def get_cur_ip(retries: int = 20, base_delay: float = 0.5):
|
||||
"""
|
||||
Returns the pod/machine's primary IP address with retry.
|
||||
This is necessary because network interfaces may not be ready
|
||||
immediately after container startup.
|
||||
"""
|
||||
delay = base_delay
|
||||
|
||||
for attempt in range(retries):
|
||||
try:
|
||||
# Best method: UDP trick (doesn't actually send packets)
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
|
||||
s.connect(("8.8.8.8", 80))
|
||||
return s.getsockname()[0]
|
||||
except Exception:
|
||||
# fallback: hostname resolution
|
||||
try:
|
||||
return socket.gethostbyname(socket.gethostname())
|
||||
except Exception:
|
||||
if attempt == retries - 1:
|
||||
raise RuntimeError("Failed to determine local IP address")
|
||||
time.sleep(delay)
|
||||
delay = min(delay * 1.5, 5)
|
||||
|
||||
|
||||
def get_net_interface(ip: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Returns specified IP's inetwork interface.
|
||||
If no IP is provided, uses the first from hostname -I.
|
||||
"""
|
||||
if ip is None:
|
||||
ip = get_cur_ip()
|
||||
|
||||
for iface, addrs in psutil.net_if_addrs().items():
|
||||
for addr in addrs:
|
||||
if addr.family == socket.AF_INET and addr.address == ip:
|
||||
return iface
|
||||
return None
|
||||
|
||||
|
||||
def get_all_ipv4():
|
||||
"""get all the ipv4 address for current node"""
|
||||
ipv4s = set()
|
||||
hostname = socket.gethostname()
|
||||
|
||||
for info in socket.getaddrinfo(hostname, None, family=socket.AF_INET):
|
||||
ipv4s.add(info[4][0])
|
||||
|
||||
ipv4s.add("127.0.0.1")
|
||||
|
||||
return list(ipv4s)
|
||||
|
||||
|
||||
def setup_logger():
|
||||
"""Setup logging configuration."""
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="[%(asctime)s] [%(levelname)s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
Reference in New Issue
Block a user