[Lint]Style: Convert test/ to ruff format(Batch #5) (#6747)

### What this PR does / why we need it?
| File Path |
| :--- |
| `tests/e2e/singlecard/compile/backend.py` |
| `tests/e2e/singlecard/compile/test_graphex_norm_quant_fusion.py` |
| `tests/e2e/singlecard/compile/test_graphex_qknorm_rope_fusion.py` |
| `tests/e2e/singlecard/compile/test_norm_quant_fusion.py` |
| `tests/e2e/singlecard/model_runner_v2/test_basic.py` |
| `tests/e2e/singlecard/test_aclgraph_accuracy.py` |
| `tests/e2e/singlecard/test_aclgraph_batch_invariant.py` |
| `tests/e2e/singlecard/test_aclgraph_mem.py` |
| `tests/e2e/singlecard/test_async_scheduling.py` |
| `tests/e2e/singlecard/test_auto_fit_max_mode_len.py` |
| `tests/e2e/singlecard/test_batch_invariant.py` |
| `tests/e2e/singlecard/test_camem.py` |
| `tests/e2e/singlecard/test_completion_with_prompt_embeds.py` |
| `tests/e2e/singlecard/test_cpu_offloading.py` |
| `tests/e2e/singlecard/test_guided_decoding.py` |
| `tests/e2e/singlecard/test_ilama_lora.py` |
| `tests/e2e/singlecard/test_llama32_lora.py` |
| `tests/e2e/singlecard/test_models.py` |
| `tests/e2e/singlecard/test_multistream_overlap_shared_expert.py` |
| `tests/e2e/singlecard/test_quantization.py` |
| `tests/e2e/singlecard/test_qwen3_multi_loras.py` |
| `tests/e2e/singlecard/test_sampler.py` |
| `tests/e2e/singlecard/test_vlm.py` |
| `tests/e2e/singlecard/test_xlite.py` |
| `tests/e2e/singlecard/utils.py` |

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.15.0
- vLLM main:
9562912cea

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-02-24 15:50:00 +08:00
committed by GitHub
parent 747484cb64
commit 62ea664aa7
26 changed files with 859 additions and 1052 deletions

View File

@@ -45,9 +45,7 @@ def test_minicpm(model) -> None:
]
max_tokens = 5
with VllmRunner(model,
max_model_len=512,
gpu_memory_utilization=0.7) as runner:
with VllmRunner(model, max_model_len=512, gpu_memory_utilization=0.7) as runner:
runner.generate_greedy(example_prompts, max_tokens)
@@ -56,19 +54,12 @@ def test_whisper(model) -> None:
prompts = ["<|startoftranscript|><|en|><|transcribe|><|notimestamps|>"]
audios = [AudioAsset("mary_had_lamb").audio_and_sample_rate]
sampling_params = SamplingParams(temperature=0.2,
max_tokens=10,
stop_token_ids=None)
sampling_params = SamplingParams(temperature=0.2, max_tokens=10, stop_token_ids=None)
with VllmRunner(model,
max_model_len=448,
max_num_seqs=5,
dtype="bfloat16",
block_size=128,
gpu_memory_utilization=0.9) as runner:
outputs = runner.generate(prompts=prompts,
audios=audios,
sampling_params=sampling_params)
with VllmRunner(
model, max_model_len=448, max_num_seqs=5, dtype="bfloat16", block_size=128, gpu_memory_utilization=0.9
) as runner:
outputs = runner.generate(prompts=prompts, audios=audios, sampling_params=sampling_params)
assert outputs is not None, "Generated outputs should not be None."
assert len(outputs) > 0, "Generated outputs should not be empty."