[Lint]Style: Convert root, benchmarks, tools and docs to ruff format (#5843)

### What this PR does / why we need it?
Description
This PR fixes linting issues in the root directory, benchmarks/, tools/
and docs/ to align with the project's Ruff configuration.

This is part of a gradual effort to enable full linting coverage across
the repository. The corresponding paths have been removed from the
exclude list in pyproject.toml.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

---------

Signed-off-by: root <root@LAPTOP-VQKDDVMG.localdomain>
Co-authored-by: root <root@LAPTOP-VQKDDVMG.localdomain>
This commit is contained in:
SILONG ZENG
2026-01-13 15:29:34 +08:00
committed by GitHub
parent 4b679984de
commit 523e83016b
14 changed files with 425 additions and 531 deletions

View File

@@ -24,42 +24,58 @@ from .aisbench import maybe_download_from_modelscope
class VllmbenchRunner:
def _run_vllm_bench_task(self):
vllm_bench_cmd = [
'vllm', 'bench', 'serve', '--backend', 'openai-chat',
'--trust-remote-code', '--served-model-name',
str(self.model_name), '--model', self.model_path, '--tokenizer',
self.model_path, '--metric-percentiles', '50,90,99', '--host',
self.host_ip, '--port',
str(self.port), '--save-result', '--result-filename',
self.result_filename, '--endpoint', '/v1/chat/completions',
'--ready-check-timeout-sec', '0'
"vllm",
"bench",
"serve",
"--backend",
"openai-chat",
"--trust-remote-code",
"--served-model-name",
str(self.model_name),
"--model",
self.model_path,
"--tokenizer",
self.model_path,
"--metric-percentiles",
"50,90,99",
"--host",
self.host_ip,
"--port",
str(self.port),
"--save-result",
"--result-filename",
self.result_filename,
"--endpoint",
"/v1/chat/completions",
"--ready-check-timeout-sec",
"0",
]
self._concat_config_args(vllm_bench_cmd)
print(f"running vllm_bench cmd: {' '.join(vllm_bench_cmd)}")
self.proc: subprocess.Popen = subprocess.Popen(vllm_bench_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True)
self.proc: subprocess.Popen = subprocess.Popen(
vllm_bench_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
def __init__(self,
model_name: str,
port: int,
config: dict,
baseline: float,
threshold: float = 0.97,
model_path: str = "",
host_ip: str = "localhost"):
def __init__(
self,
model_name: str,
port: int,
config: dict,
baseline: float,
threshold: float = 0.97,
model_path: str = "",
host_ip: str = "localhost",
):
self.model_name = model_name
self.model_path = model_path
if not self.model_path:
self.model_path = maybe_download_from_modelscope(model_name)
assert self.model_path is not None, \
f"Failed to download model: model={self.model_path}"
assert self.model_path is not None, f"Failed to download model: model={self.model_path}"
self.port = port
self.host_ip = host_ip
curr_time = datetime.now().strftime('%Y%m%d%H%M%S')
curr_time = datetime.now().strftime("%Y%m%d%H%M%S")
self.result_filename = f"result_vllm_bench_{curr_time}.json"
self.config = config
self.baseline = baseline
@@ -96,19 +112,14 @@ class VllmbenchRunner:
stdout, stderr = self.proc.communicate()
if self.proc.returncode != 0:
logging.error(
f"vllm bench command failed, return code: {self.proc.returncode}"
)
logging.error(f"vllm bench command failed, return code: {self.proc.returncode}")
logging.error(f"Standard output: {stdout}")
logging.error(f"Standard error: {stderr}")
raise RuntimeError(
f"vllm bench command execution failed: {stderr}")
raise RuntimeError(f"vllm bench command execution failed: {stderr}")
logging.info(
f"vllm bench command completed, return code: {self.proc.returncode}"
)
logging.info(f"vllm bench command completed, return code: {self.proc.returncode}")
if stdout:
lines = stdout.split('\n')
lines = stdout.split("\n")
last_lines = lines[-100:] if len(lines) > 100 else lines
logging.info(f"Last {len(last_lines)} lines of standard output:")
for line in last_lines:
@@ -119,36 +130,28 @@ class VllmbenchRunner:
def _get_result(self):
result_file = os.path.join(os.getcwd(), self.result_filename)
print("Getting performance results from file: ", result_file)
with open(result_file, 'r', encoding='utf-8') as f:
with open(result_file, encoding="utf-8") as f:
self.result = json.load(f)
def _performance_verify(self):
self._get_result()
output_throughput = self.result["output_throughput"]
assert float(
output_throughput
) >= self.baseline * self.threshold, f"Performance verification failed. The current Output Token Throughput is {output_throughput} token/s, which is not greater than or equal to {self.threshold} * baseline {self.baseline}."
assert float(output_throughput) >= self.baseline * self.threshold, (
"Performance verification failed. "
f"The current Output Token Throughput is {output_throughput} token/s, "
f"which is not greater than or equal to {self.threshold} * baseline {self.baseline}."
)
def run_vllm_bench_case(model_name,
port,
config,
baseline,
threshold=0.97,
model_path="",
host_ip="localhost"):
def run_vllm_bench_case(model_name, port, config, baseline, threshold=0.97, model_path="", host_ip="localhost"):
try:
with VllmbenchRunner(model_name,
port,
config,
baseline,
threshold,
model_path=model_path,
host_ip=host_ip) as vllm_bench:
with VllmbenchRunner(
model_name, port, config, baseline, threshold, model_path=model_path, host_ip=host_ip
) as vllm_bench:
vllm_bench_result = vllm_bench.result
except Exception as e:
print(e)
error_msg = f"vllm_bench run failed, reason is {e}"
logging.error(error_msg)
assert False, f"vllm_bench run failed, reason is {e}"
raise RuntimeError(error_msg) from e
return vllm_bench_result