Files
xc-llm-ascend/tools/send_request.py
jiangyunfan1 48854aef5c [TEST]Add sending request with and without chat (#5286)
### What this PR does / why we need it?
This PR adds the method for sending chat and non-chat request, we need
it to test much folloing cases.

### Does this PR introduce _any_ user-facing change?
No

### How was this patch tested?
by running the test

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: jiangyunfan1 <jiangyunfan1@h-partners.com>
2025-12-26 18:04:17 +08:00

38 lines
1.2 KiB
Python

from typing import Any
import requests
def send_v1_completions(prompt, model, server, request_args=None):
data: dict[str, Any] = {"model": model, "prompt": prompt}
if request_args:
data.update(request_args)
url = server.url_for("v1", "completions")
response = requests.post(url, json=data)
print(f"Status Code: {response.status_code}")
response_json = response.json()
print(f"Response json: {response_json}")
response_text = response_json["choices"][0]["text"]
print(f"Response: {response_text}")
assert response_text, "empty response"
def send_v1_chat_completions(prompt, model, server, request_args=None):
data: dict[str, Any] = {
"model": model,
"messages": [{
"role": "user",
"content": prompt,
}],
}
if request_args:
data.update(request_args)
url = server.url_for("v1", "chat", "completions")
response = requests.post(url, json=data)
print(f"Status Code: {response.status_code}")
response_json = response.json()
print(f"Response json: {response_json}")
response_text = response_json["choices"][0]["message"]["content"]
print(f"Response: {response_text}")
assert response_text, "empty response"