[Lint]Style: Convert test/ to ruff format(Batch #5) (#6747)

### What this PR does / why we need it?
| File Path |
| :--- |
| `tests/e2e/singlecard/compile/backend.py` |
| `tests/e2e/singlecard/compile/test_graphex_norm_quant_fusion.py` |
| `tests/e2e/singlecard/compile/test_graphex_qknorm_rope_fusion.py` |
| `tests/e2e/singlecard/compile/test_norm_quant_fusion.py` |
| `tests/e2e/singlecard/model_runner_v2/test_basic.py` |
| `tests/e2e/singlecard/test_aclgraph_accuracy.py` |
| `tests/e2e/singlecard/test_aclgraph_batch_invariant.py` |
| `tests/e2e/singlecard/test_aclgraph_mem.py` |
| `tests/e2e/singlecard/test_async_scheduling.py` |
| `tests/e2e/singlecard/test_auto_fit_max_mode_len.py` |
| `tests/e2e/singlecard/test_batch_invariant.py` |
| `tests/e2e/singlecard/test_camem.py` |
| `tests/e2e/singlecard/test_completion_with_prompt_embeds.py` |
| `tests/e2e/singlecard/test_cpu_offloading.py` |
| `tests/e2e/singlecard/test_guided_decoding.py` |
| `tests/e2e/singlecard/test_ilama_lora.py` |
| `tests/e2e/singlecard/test_llama32_lora.py` |
| `tests/e2e/singlecard/test_models.py` |
| `tests/e2e/singlecard/test_multistream_overlap_shared_expert.py` |
| `tests/e2e/singlecard/test_quantization.py` |
| `tests/e2e/singlecard/test_qwen3_multi_loras.py` |
| `tests/e2e/singlecard/test_sampler.py` |
| `tests/e2e/singlecard/test_vlm.py` |
| `tests/e2e/singlecard/test_xlite.py` |
| `tests/e2e/singlecard/utils.py` |

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.15.0
- vLLM main:
9562912cea

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-02-24 15:50:00 +08:00
committed by GitHub
parent 747484cb64
commit 62ea664aa7
26 changed files with 859 additions and 1052 deletions

View File

@@ -42,6 +42,7 @@ def new_kv_cache_spec(
attention_chunk_size=attention_chunk_size,
)
def test_auto_fit_max_model_len():
"""Test that max_model_len=-1 auto-fits to available NPU memory."""
# Create config with original_max_model_len=-1 to trigger auto-fit
@@ -59,9 +60,7 @@ def test_auto_fit_max_model_len():
# With enough memory, max_model_len stays at the derived max
large_available_memory = mem_per_block_per_layer * 2 * 1024 # plenty of memory
_kv_cache_configs = get_kv_cache_configs(
vllm_config, [kv_cache_specs], [large_available_memory]
)
_kv_cache_configs = get_kv_cache_configs(vllm_config, [kv_cache_specs], [large_available_memory])
assert vllm_config.model_config.max_model_len == 1024
# Reset for next test
@@ -73,9 +72,7 @@ def test_auto_fit_max_model_len():
# Need memory for at least max_model_len tokens
# 32 blocks worth of memory for 2 layers = can fit 32*16=512 tokens
limited_memory = mem_per_block_per_layer * 2 * 32
_kv_cache_configs = get_kv_cache_configs(
vllm_config, [kv_cache_specs], [limited_memory]
)
_kv_cache_configs = get_kv_cache_configs(vllm_config, [kv_cache_specs], [limited_memory])
# Should be reduced to fit in memory
assert vllm_config.model_config.max_model_len < 1024
assert vllm_config.model_config.max_model_len > 0
@@ -94,7 +91,5 @@ def test_auto_fit_max_model_len_not_triggered():
}
# This should work normally without auto-fit
_kv_cache_configs = get_kv_cache_configs(
vllm_config, [kv_cache_specs], [mem_per_block_per_layer * 2 * 32]
)
_kv_cache_configs = get_kv_cache_configs(vllm_config, [kv_cache_specs], [mem_per_block_per_layer * 2 * 32])
assert vllm_config.model_config.max_model_len == 16