### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
| `tests/e2e/310p/multicard/test_vl_model_multicard.py` |
| `tests/e2e/310p/singlecard/test_vl_model_singlecard.py` |
| `tests/e2e/310p/test_utils.py` |
| `tests/e2e/conftest.py` |
| `tests/e2e/model_utils.py` |
| `tests/e2e/models/conftest.py` |
| `tests/e2e/models/test_lm_eval_correctness.py` |
| `tests/e2e/multicard/2-cards/spec_decode/test_spec_decode.py` |
| `tests/e2e/multicard/2-cards/test_aclgraph_capture_replay.py` |
| `tests/e2e/multicard/2-cards/test_data_parallel.py` |
| `tests/e2e/multicard/2-cards/test_disaggregated_encoder.py` |
| `tests/e2e/multicard/2-cards/test_expert_parallel.py` |
| `tests/e2e/multicard/2-cards/test_external_launcher.py` |
| `tests/e2e/multicard/2-cards/test_full_graph_mode.py` |
| `tests/e2e/multicard/2-cards/test_ilama_lora_tp2.py` |
| `tests/e2e/multicard/2-cards/test_offline_inference_distributed.py` |
| `tests/e2e/multicard/2-cards/test_offline_weight_load.py` |
| `tests/e2e/multicard/2-cards/test_pipeline_parallel.py` |
| `tests/e2e/multicard/2-cards/test_prefix_caching.py` |
| `tests/e2e/multicard/2-cards/test_quantization.py` |
| `tests/e2e/multicard/2-cards/test_qwen3_moe.py` |
| `tests/e2e/multicard/2-cards/test_qwen3_moe_routing_replay.py` |
| `tests/e2e/multicard/2-cards/test_qwen3_performance.py` |
| `tests/e2e/multicard/2-cards/test_shared_expert_dp.py` |
| `tests/e2e/multicard/2-cards/test_single_request_aclgraph.py` |
| `tests/e2e/multicard/2-cards/test_sp_pass.py` |
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.15.0
- vLLM main:
9562912cea
Signed-off-by: MrZ20 <2609716663@qq.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -39,8 +39,7 @@ api_keyword_args = {
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("dp_size", DATA_PARALLELS)
|
||||
async def test_models_single_request_aclgraph_dp2(model: str,
|
||||
dp_size: int) -> None:
|
||||
async def test_models_single_request_aclgraph_dp2(model: str, dp_size: int) -> None:
|
||||
port = get_open_port()
|
||||
env_dict = {
|
||||
"TASK_QUEUE_ENABLE": "1",
|
||||
@@ -48,36 +47,51 @@ async def test_models_single_request_aclgraph_dp2(model: str,
|
||||
}
|
||||
if model == "vllm-ascend/DeepSeek-V2-Lite-W8A8":
|
||||
server_args = [
|
||||
"--no-enable-prefix-caching", "--tensor-parallel-size", "1",
|
||||
"--no-enable-prefix-caching",
|
||||
"--tensor-parallel-size",
|
||||
"1",
|
||||
"--data-parallel-size",
|
||||
str(dp_size), "--quantization", "ascend", "--max-model-len",
|
||||
"1024", "--port",
|
||||
str(port), "--trust-remote-code", "--gpu-memory-utilization", "0.9"
|
||||
str(dp_size),
|
||||
"--quantization",
|
||||
"ascend",
|
||||
"--max-model-len",
|
||||
"1024",
|
||||
"--port",
|
||||
str(port),
|
||||
"--trust-remote-code",
|
||||
"--gpu-memory-utilization",
|
||||
"0.9",
|
||||
]
|
||||
else:
|
||||
server_args = [
|
||||
"--no-enable-prefix-caching", "--tensor-parallel-size", "1",
|
||||
"--no-enable-prefix-caching",
|
||||
"--tensor-parallel-size",
|
||||
"1",
|
||||
"--data-parallel-size",
|
||||
str(dp_size), "--port",
|
||||
str(port), "--trust-remote-code", "--gpu-memory-utilization", "0.9"
|
||||
str(dp_size),
|
||||
"--port",
|
||||
str(port),
|
||||
"--trust-remote-code",
|
||||
"--gpu-memory-utilization",
|
||||
"0.9",
|
||||
]
|
||||
request_keyword_args: dict[str, Any] = {
|
||||
**api_keyword_args,
|
||||
}
|
||||
with RemoteOpenAIServer(model,
|
||||
vllm_serve_args=server_args,
|
||||
server_port=port,
|
||||
env_dict=env_dict,
|
||||
auto_port=False) as server:
|
||||
with RemoteOpenAIServer(
|
||||
model, vllm_serve_args=server_args, server_port=port, env_dict=env_dict, auto_port=False
|
||||
) as server:
|
||||
client = server.get_async_client()
|
||||
|
||||
try:
|
||||
batch = await asyncio.wait_for(client.completions.create(
|
||||
model=model,
|
||||
prompt=prompts,
|
||||
**request_keyword_args,
|
||||
),
|
||||
timeout=10.0)
|
||||
batch = await asyncio.wait_for(
|
||||
client.completions.create(
|
||||
model=model,
|
||||
prompt=prompts,
|
||||
**request_keyword_args,
|
||||
),
|
||||
timeout=10.0,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
pytest.fail("Model did not return response within 10 seconds")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user