Files
xc-llm-ascend/.github/workflows/scripts/ci_utils.py
meihanc ab9cd2e305 [CI]Add CI summary log (#7202)
### What this PR does / why we need it?
This PR adds a new CI log summarizer, `ci_log_summary.py`, and wires it
into unit-test and e2e workflows so failed jobs publish a structured
failure summary to the GitHub step summary.
Examples:
- `python3 .github/workflows/scripts/ci_log_summary.py --log-file
/tmp/unit-test.log --mode ut --step-name "Unit test"`
- `python3 .github/workflows/scripts/ci_log_summary.py --run-id
23127187822 --format json`

A maintenance note is added to `ci_utils.py` to clarify that the `START`
/ `PASSED` / `FAILED (exit code X)` log lines are parsed by
`ci_log_summary.py`, so any future format changes must be coordinated
with the corresponding summarizer regexes.

🤖 Generated with [Codex]<noreply@openai.com>
- vLLM version: v0.16.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com>
Signed-off-by: meihanc <jcccx.cmh@gmail.com>
Co-authored-by: Codex <noreply@openai.com>
2026-03-19 09:32:06 +08:00

104 lines
3.2 KiB
Python

import subprocess
import time
from dataclasses import dataclass
class _Color:
HEADER = "\033[95m"
GREEN = "\033[92m"
RED = "\033[91m"
RESET = "\033[0m"
@dataclass
class TestFile:
name: str
estimated_time: float = 60
is_skipped: bool = False
@dataclass
class TestRecord:
name: str
passed: bool
elapsed: float
estimated: float
def to_dict(self) -> dict:
return {
"name": self.name,
"passed": self.passed,
"elapsed": self.elapsed,
"estimated": self.estimated,
}
def run_tests(
files: list[TestFile],
continue_on_error: bool = False,
) -> tuple[int, list[TestRecord]]:
"""
Run each TestFile with pytest and collect timing results.
NOTE:
The emitted START / PASSED / FAILED log lines are parsed by
ci_log_summary.py to recover per-test invocation boundaries.
Keep this output format stable, or update the corresponding
regexes in those CI log summarizers together.
Args:
files: Tests to run (skipped entries should already be filtered out).
continue_on_error: If True, keep running after a failure.
report_path: If provided, write a Markdown timing report here.
Returns:
(exit_code, records) — exit_code is 0 on full success, -1 otherwise.
"""
records: list[TestRecord] = []
all_passed = True
total_start = time.perf_counter()
for i, test in enumerate(files):
print(f"\n{'.' * 60}", flush=True)
# NOTE: ci_log_summary.py depend on this
# START line format when splitting suite-level logs into test runs.
print(
f"{_Color.HEADER}[{i + 1}/{len(files)}] START {test.name}{_Color.RESET}",
flush=True,
)
start = time.perf_counter()
result = subprocess.run(["pytest", "-sv", "--durations=0", "--color=yes", test.name])
elapsed = time.perf_counter() - start
passed = result.returncode == 0
records.append(TestRecord(name=test.name, passed=passed, elapsed=elapsed, estimated=test.estimated_time))
color = _Color.GREEN if passed else _Color.RED
status = "PASSED" if passed else f"FAILED (exit code {result.returncode})"
# NOTE: ci_log_summary.py depend on this
# PASSED / FAILED (exit code X) line format for suite end detection.
print(
f"{color}[{i + 1}/{len(files)}] {status} {test.name} ({elapsed:.0f}s){_Color.RESET}",
flush=True,
)
if not passed:
all_passed = False
if not continue_on_error:
break
total_elapsed = time.perf_counter() - total_start
passed_count = sum(1 for r in records if r.passed)
print(f"\n{'=' * 60}")
color = _Color.GREEN if all_passed else _Color.RED
print(f"{color}Summary: {passed_count}/{len(files)} passed ({total_elapsed:.2f}s total){_Color.RESET}")
print("=" * 60)
for r in records:
icon = f"{_Color.GREEN}{_Color.RESET}" if r.passed else f"{_Color.RED}{_Color.RESET}"
print(f" {icon} {r.name} ({r.elapsed:.0f}s)")
print(flush=True)
return (0 if all_passed else -1), records