[CI] test chunked prefill more (#5798)
This commit is contained in:
1
.github/workflows/pr-test.yml
vendored
1
.github/workflows/pr-test.yml
vendored
@@ -123,6 +123,7 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
cd test/srt
|
||||
python3 -m unittest test_bench_one_batch.TestBenchOneBatch.test_bs1_small
|
||||
python3 -m unittest test_bench_one_batch.TestBenchOneBatch.test_bs1_default
|
||||
|
||||
- name: Benchmark online latency
|
||||
|
||||
@@ -54,20 +54,21 @@ Please consult the documentation below and [server_args.py](https://github.com/s
|
||||
|
||||
| Arguments | Description | Defaults |
|
||||
|----------|-------------|---------|
|
||||
| `model_path` | Path to the model that will be served. | None |
|
||||
| `tokenizer_path` | Defaults to the `model_path`. | None |
|
||||
| `model_path` | The path of the model weights. This can be a local folder or a Hugging Face repo ID. | None |
|
||||
| `tokenizer_path` | The path of the tokenizer. Defaults to the `model_path`. | None |
|
||||
| `tokenizer_mode` | See [different mode](https://huggingface.co/docs/transformers/en/main_classes/tokenizer). | `auto` |
|
||||
| `load_format` | The format the weights are loaded in. | `auto` |
|
||||
| `trust_remote_code` | If `true`, will use locally cached config files, otherwise use remote configs in HuggingFace. | `False` |
|
||||
| `dtype` | Dtype used for the model. | `bfloat16` |
|
||||
| `kv_cache_dtype` | Dtype of the kv cache. | `dtype` |
|
||||
| `context_length` | The number of tokens our model can process *including the input*. Note that extending the default might lead to strange behavior. | None |
|
||||
| `load_format` | The format of the model weights to load. | `auto` |
|
||||
| `trust_remote_code` | Whether or not to allow for custom models defined on the Hub in their own modeling files. | `False` |
|
||||
| `dtype` | Dtype used for the model. | `auto` |
|
||||
| `kv_cache_dtype` | Dtype of the kv cache. | `auto` |
|
||||
| `context_length` | The model's maximum context length. Defaults to None (will use the value from the model's config.json instead). Note that extending the default might lead to strange behavior. | None |
|
||||
| `device` | The device we put the model. | None |
|
||||
| `chat_template` | The chat template to use. See [multi-modal templates](https://docs.sglang.ai/backend/openai_api_vision.ipynb#Chat-Template). **Make sure the correct `chat_template` is passed, or performance degradation may occur!!!!** | None |
|
||||
| `device` | The device we put the model. | None |
|
||||
| `served_model_name` | Override the model name returned by the v1/models endpoint in OpenAI API server.| None |
|
||||
| `is_embedding` | Set to `true` to perform [embedding](./openai_api_embeddings.ipynb) / [encode](https://docs.sglang.ai/backend/native_api#Encode-(embedding-model)) and [reward](https://docs.sglang.ai/backend/native_api#Classify-(reward-model)) tasks. | `False` |
|
||||
| `revision` | Adjust if a specific version of the model should be used. | None |
|
||||
| `skip_tokenizer_init` | Set to `true` to provide the tokens to the engine and get the output tokens directly, typically used in RLHF. See [example](https://github.com/sgl-project/sglang/blob/main/examples/runtime/token_in_token_out/). | `False` |
|
||||
| `json_model_override_args` | Override model config with the provided JSON. | `"{}"` |
|
||||
| `json_model_override_args` | A dictionary in JSON string format used to override default model configurations. | `"{}"` |
|
||||
| `disable_fast_image_processor` | Adopt base image processor instead of fast image processor (which is by default). See [details](https://huggingface.co/docs/transformers/main/en/main_classes/image_processor#image-processor). | `False` |
|
||||
|
||||
## Serving: HTTP & API
|
||||
@@ -188,17 +189,6 @@ Please consult the documentation below and [server_args.py](https://github.com/s
|
||||
| `speculative_eagle_topk` | The number of top candidates we keep for verification at each step for [Eagle](https://arxiv.org/html/2406.16858v1). | None |
|
||||
| `speculative_token_map` | Optional, the path to the high frequency token list of [FR-Spec](https://arxiv.org/html/2502.14856v1), used for accelerating [Eagle](https://arxiv.org/html/2406.16858v1). | None |
|
||||
|
||||
## Double Sparsity
|
||||
|
||||
| Arguments | Description | Defaults |
|
||||
|----------|-------------|---------|
|
||||
| `enable_double_sparsity` | Enables [double sparsity](https://arxiv.org/html/2408.07092v2) which increases throughput. | `False` |
|
||||
| `ds_channel_config_path` | The double sparsity config. See [a guide on how to generate the config for your model](https://github.com/andy-yang-1/DoubleSparse/tree/main/config). | None |
|
||||
| `ds_heavy_channel_num` | Number of channel indices to keep for each layer. | `32` |
|
||||
| `ds_heavy_token_num` | Number of tokens used for attention during decode. Skip sparse decoding if `min_seq_len` in batch is less than this number. | `256` |
|
||||
| `ds_heavy_channel_type` | The type of heavy channels. Options are `q`, `k` or `qk`. | `qk` |
|
||||
| `ds_sparse_decode_threshold` | Don't apply sparse decoding if `max_seq_len` in batch < this threshold. | `4096` |
|
||||
|
||||
## Debug options
|
||||
|
||||
*Note: We recommend to stay with the defaults and only use these options for debugging for best possible performance.*
|
||||
|
||||
@@ -975,7 +975,7 @@ class ModelRunner:
|
||||
after_mem = get_available_gpu_memory(self.device, self.gpu_id)
|
||||
logger.info(
|
||||
f"Capture cuda graph end. Time elapsed: {time.time() - tic:.2f} s. "
|
||||
f"avail mem={after_mem:.2f} GB. mem usage={(before_mem - after_mem):.2f} GB."
|
||||
f"mem usage={(before_mem - after_mem):.2f} GB. avail mem={after_mem:.2f} GB."
|
||||
)
|
||||
|
||||
def apply_torch_tp(self):
|
||||
|
||||
@@ -426,7 +426,7 @@ class ServerArgs:
|
||||
parser.add_argument(
|
||||
"--skip-tokenizer-init",
|
||||
action="store_true",
|
||||
help="If set, skip init tokenizer and pass input_ids in generate request",
|
||||
help="If set, skip init tokenizer and pass input_ids in generate request.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--enable-tokenizer-batch-encode",
|
||||
@@ -565,6 +565,7 @@ class ServerArgs:
|
||||
"name, a tag name, or a commit id. If unspecified, will use "
|
||||
"the default version.",
|
||||
)
|
||||
|
||||
# Memory and scheduling
|
||||
parser.add_argument(
|
||||
"--mem-fraction-static",
|
||||
|
||||
@@ -6,11 +6,56 @@ python3 -m sglang.test.send_one
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import dataclasses
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class BenchArgs:
|
||||
host: str = "localhost"
|
||||
port: int = 30000
|
||||
batch_size: int = 1
|
||||
temperature: float = 0.0
|
||||
max_new_tokens: int = 512
|
||||
frequency_penalty: float = 0.0
|
||||
presence_penalty: float = 0.0
|
||||
json: bool = False
|
||||
return_logprob: bool = False
|
||||
prompt: str = (
|
||||
"Human: Give me a fully functional FastAPI server. Show the python code.\n\nAssistant:"
|
||||
)
|
||||
image: bool = False
|
||||
stream: bool = False
|
||||
|
||||
@staticmethod
|
||||
def add_cli_args(parser: argparse.ArgumentParser):
|
||||
parser.add_argument("--host", type=str, default=BenchArgs.host)
|
||||
parser.add_argument("--port", type=int, default=BenchArgs.port)
|
||||
parser.add_argument("--batch-size", type=int, default=BenchArgs.batch_size)
|
||||
parser.add_argument("--temperature", type=float, default=BenchArgs.temperature)
|
||||
parser.add_argument(
|
||||
"--max-new-tokens", type=int, default=BenchArgs.max_new_tokens
|
||||
)
|
||||
parser.add_argument(
|
||||
"--frequency-penalty", type=float, default=BenchArgs.frequency_penalty
|
||||
)
|
||||
parser.add_argument(
|
||||
"--presence-penalty", type=float, default=BenchArgs.presence_penalty
|
||||
)
|
||||
parser.add_argument("--json", action="store_true")
|
||||
parser.add_argument("--return-logprob", action="store_true")
|
||||
parser.add_argument("--prompt", type=str, default=BenchArgs.prompt)
|
||||
parser.add_argument("--image", action="store_true")
|
||||
parser.add_argument("--stream", action="store_true")
|
||||
|
||||
@classmethod
|
||||
def from_cli_args(cls, args: argparse.Namespace):
|
||||
attrs = [attr.name for attr in dataclasses.fields(cls)]
|
||||
return cls(**{attr: getattr(args, attr) for attr in attrs})
|
||||
|
||||
|
||||
def send_one_prompt(args):
|
||||
if args.image:
|
||||
args.prompt = (
|
||||
@@ -20,20 +65,42 @@ def send_one_prompt(args):
|
||||
else:
|
||||
image_data = None
|
||||
|
||||
response = requests.post(
|
||||
"http://localhost:30000/generate",
|
||||
json={
|
||||
"text": args.prompt,
|
||||
"image_data": image_data,
|
||||
"sampling_params": {
|
||||
"temperature": args.temperature,
|
||||
"max_new_tokens": args.max_new_tokens,
|
||||
"frequency_penalty": args.frequency_penalty,
|
||||
"presence_penalty": args.presence_penalty,
|
||||
},
|
||||
"return_logprob": args.return_logprob,
|
||||
"stream": args.stream,
|
||||
prompt = args.prompt
|
||||
|
||||
if args.json:
|
||||
prompt = (
|
||||
"Human: What is the capital of France and how is that city like. "
|
||||
"Give me 3 trivial information about that city. "
|
||||
"Write in a format of json.\nAssistant:"
|
||||
)
|
||||
json_schema = "$$ANY$$"
|
||||
json_schema = (
|
||||
'{"type": "object", "properties": {"population": {"type": "integer"}}}'
|
||||
)
|
||||
else:
|
||||
json_schema = None
|
||||
|
||||
if args.batch_size > 1:
|
||||
prompt = [prompt] * args.batch_size
|
||||
|
||||
json_data = {
|
||||
"text": prompt,
|
||||
"image_data": image_data,
|
||||
"sampling_params": {
|
||||
"temperature": args.temperature,
|
||||
"max_new_tokens": args.max_new_tokens,
|
||||
"frequency_penalty": args.frequency_penalty,
|
||||
"presence_penalty": args.presence_penalty,
|
||||
"json_schema": json_schema,
|
||||
"stop": ["Question", "Assistant:", "<|separator|>", "<|eos|>"],
|
||||
},
|
||||
"return_logprob": args.return_logprob,
|
||||
"stream": args.stream,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"http://{args.host}:{args.port}/generate",
|
||||
json=json_data,
|
||||
stream=args.stream,
|
||||
)
|
||||
|
||||
@@ -47,6 +114,9 @@ def send_one_prompt(args):
|
||||
else:
|
||||
ret = response.json()
|
||||
|
||||
if args.batch_size > 1:
|
||||
ret = ret[0]
|
||||
|
||||
latency = ret["meta_info"]["e2e_latency"]
|
||||
|
||||
if "spec_verify_ct" in ret["meta_info"]:
|
||||
@@ -68,21 +138,7 @@ def send_one_prompt(args):
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--temperature", type=float, default=0.0)
|
||||
parser.add_argument("--max-new-tokens", type=int, default=512)
|
||||
parser.add_argument("--frequency-penalty", type=float, default=0.0)
|
||||
parser.add_argument("--presence-penalty", type=float, default=0.0)
|
||||
parser.add_argument("--return-logprob", action="store_true")
|
||||
parser.add_argument(
|
||||
"--prompt",
|
||||
type=str,
|
||||
default="Human: Give me a fully functional FastAPI server. Show the python code.\n\nAssistant:",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--image",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument("--stream", action="store_true")
|
||||
BenchArgs.add_cli_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
send_one_prompt(args)
|
||||
|
||||
@@ -732,6 +732,44 @@ def run_bench_one_batch(model, other_args):
|
||||
return output_throughput
|
||||
|
||||
|
||||
def run_bench_offline_throughput(model, other_args):
|
||||
command = [
|
||||
"python3",
|
||||
"-m",
|
||||
"sglang.bench_offline_throughput",
|
||||
"--num-prompts",
|
||||
"1",
|
||||
"--dataset-name",
|
||||
"random",
|
||||
"--random-input-len",
|
||||
"256",
|
||||
"--random-output-len",
|
||||
"256",
|
||||
"--model-path",
|
||||
model,
|
||||
*[str(x) for x in other_args],
|
||||
]
|
||||
|
||||
print(f"{command=}")
|
||||
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
try:
|
||||
stdout, stderr = process.communicate()
|
||||
output = stdout.decode()
|
||||
error = stderr.decode()
|
||||
print(f"Output: {output}", flush=True)
|
||||
print(f"Error: {error}", flush=True)
|
||||
|
||||
output_throughput = -1
|
||||
for line in output.split("\n"):
|
||||
if "Last generation throughput (tok/s):" in line:
|
||||
output_throughput = float(line.split(":")[-1])
|
||||
finally:
|
||||
kill_process_tree(process.pid)
|
||||
|
||||
return output_throughput
|
||||
|
||||
|
||||
def lcs(X, Y):
|
||||
m = len(X)
|
||||
n = len(Y)
|
||||
|
||||
@@ -26,7 +26,7 @@ class TestDummyGrok1(CustomTestCase):
|
||||
)
|
||||
|
||||
if is_in_ci():
|
||||
assert output_throughput > 0, f"{output_throughput=}"
|
||||
self.assertGreater(output_throughput, 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -64,7 +64,7 @@ class TestVLMModels(CustomTestCase):
|
||||
model = "openai_compatible"
|
||||
tp = 1
|
||||
tasks = "mmmu_val"
|
||||
batch_size = 1
|
||||
batch_size = 2
|
||||
log_suffix = "openai_compatible"
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
@@ -125,6 +125,9 @@ class TestVLMModels(CustomTestCase):
|
||||
"--chat-template",
|
||||
model.chat_template,
|
||||
"--trust-remote-code",
|
||||
"--cuda-graph-max-bs",
|
||||
"32",
|
||||
"--enable-multimodal",
|
||||
"--mem-fraction-static",
|
||||
str(self.parsed_args.mem_fraction_static), # Use class variable
|
||||
],
|
||||
@@ -171,7 +174,7 @@ if __name__ == "__main__":
|
||||
"--mem-fraction-static",
|
||||
type=float,
|
||||
help="Static memory fraction for the model",
|
||||
default=0.6,
|
||||
default=0.8,
|
||||
)
|
||||
|
||||
# Parse args intended for unittest
|
||||
|
||||
@@ -3,16 +3,28 @@ import unittest
|
||||
from sglang.test.test_utils import (
|
||||
DEFAULT_MODEL_NAME_FOR_TEST,
|
||||
DEFAULT_MOE_MODEL_NAME_FOR_TEST,
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
|
||||
CustomTestCase,
|
||||
is_in_ci,
|
||||
run_bench_offline_throughput,
|
||||
run_bench_one_batch,
|
||||
write_github_step_summary,
|
||||
)
|
||||
|
||||
# We use `run_bench_offline_throughput`` instead of `run_bench_one_batch` for most cases
|
||||
# because `run_bench_offline_throughput`` has overlap scheduler.
|
||||
|
||||
|
||||
class TestBenchOneBatch(CustomTestCase):
|
||||
def test_bs1_default(self):
|
||||
|
||||
def test_bs1_small(self):
|
||||
output_throughput = run_bench_one_batch(
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
|
||||
)
|
||||
self.assertGreater(output_throughput, 50)
|
||||
|
||||
def test_bs1_default(self):
|
||||
output_throughput = run_bench_offline_throughput(
|
||||
DEFAULT_MODEL_NAME_FOR_TEST, ["--cuda-graph-max-bs", "2"]
|
||||
)
|
||||
|
||||
@@ -24,26 +36,26 @@ class TestBenchOneBatch(CustomTestCase):
|
||||
self.assertGreater(output_throughput, 135)
|
||||
|
||||
def test_moe_tp2_bs1(self):
|
||||
output_throughput = run_bench_one_batch(
|
||||
output_throughput = run_bench_offline_throughput(
|
||||
DEFAULT_MOE_MODEL_NAME_FOR_TEST, ["--tp", "2", "--cuda-graph-max-bs", "2"]
|
||||
)
|
||||
|
||||
if is_in_ci():
|
||||
write_github_step_summary(
|
||||
f"### test_moe_tp2_bs1\n"
|
||||
f"### test_moe_tp2_bs1 (Mixtral-8x7B)\n"
|
||||
f"output_throughput: {output_throughput:.2f} token/s\n"
|
||||
)
|
||||
self.assertGreater(output_throughput, 125)
|
||||
|
||||
def test_torch_compile_tp2_bs1(self):
|
||||
output_throughput = run_bench_one_batch(
|
||||
output_throughput = run_bench_offline_throughput(
|
||||
DEFAULT_MODEL_NAME_FOR_TEST,
|
||||
["--tp", "2", "--enable-torch-compile", "--cuda-graph-max-bs", "2"],
|
||||
)
|
||||
|
||||
if is_in_ci():
|
||||
write_github_step_summary(
|
||||
f"### test_torch_compile_tp2_bs1\n"
|
||||
f"### test_torch_compile_tp2_bs1 (Mixtral-8x7B)\n"
|
||||
f"output_throughput: {output_throughput:.2f} token/s\n"
|
||||
)
|
||||
self.assertGreater(output_throughput, 220)
|
||||
|
||||
@@ -5,13 +5,13 @@ import requests
|
||||
|
||||
from sglang.srt.utils import kill_process_tree
|
||||
from sglang.test.few_shot_gsm8k import run_eval as run_eval_few_shot_gsm8k
|
||||
from sglang.test.send_one import BenchArgs, send_one_prompt
|
||||
from sglang.test.test_utils import (
|
||||
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
|
||||
DEFAULT_URL_FOR_TEST,
|
||||
CustomTestCase,
|
||||
is_in_ci,
|
||||
popen_launch_server,
|
||||
run_bench_one_batch,
|
||||
write_github_step_summary,
|
||||
)
|
||||
|
||||
@@ -48,22 +48,23 @@ class TestDeepseekV3(CustomTestCase):
|
||||
metrics = run_eval_few_shot_gsm8k(args)
|
||||
print(f"{metrics=}")
|
||||
|
||||
self.assertGreater(metrics["accuracy"], 0.935)
|
||||
if is_in_ci():
|
||||
write_github_step_summary(
|
||||
f"### test_gsm8k (deepseek-v3)\n" f'{metrics["accuracy"]=:.3f}\n'
|
||||
)
|
||||
self.assertGreater(metrics["accuracy"], 0.935)
|
||||
|
||||
def test_bs_1_speed(self):
|
||||
args = BenchArgs(port=int(self.base_url.split(":")[-1]), max_new_tokens=2048)
|
||||
acc_length, speed = send_one_prompt(args)
|
||||
|
||||
class TestBenchOneBatch(CustomTestCase):
|
||||
def test_bs1(self):
|
||||
output_throughput = run_bench_one_batch(
|
||||
FULL_DEEPSEEK_V3_MODEL_PATH,
|
||||
["--trust-remote-code", "--tp", "8", "--cuda-graph-max-bs", "2"],
|
||||
)
|
||||
print(f"{output_throughput=:.2f} token/s")
|
||||
print(f"{speed=:.2f}")
|
||||
|
||||
if is_in_ci():
|
||||
write_github_step_summary(
|
||||
f"### test_bs1 (deepseek-v3)\n" f"{output_throughput=:.2f} token/s\n"
|
||||
f"### test_bs_1_speed (deepseek-v3)\n" f"{speed=:.2f} token/s\n"
|
||||
)
|
||||
self.assertGreater(output_throughput, 70)
|
||||
self.assertGreater(speed, 75)
|
||||
|
||||
|
||||
class TestDeepseekV3MTP(CustomTestCase):
|
||||
@@ -80,13 +81,13 @@ class TestDeepseekV3MTP(CustomTestCase):
|
||||
"--speculative-draft",
|
||||
"lmsys/DeepSeek-V3-0324-NextN",
|
||||
"--speculative-num-steps",
|
||||
"5",
|
||||
"3",
|
||||
"--speculative-eagle-topk",
|
||||
"4",
|
||||
"2",
|
||||
"--speculative-num-draft-tokens",
|
||||
"8",
|
||||
"4",
|
||||
"--mem-fraction-static",
|
||||
"0.6",
|
||||
"0.7",
|
||||
]
|
||||
cls.process = popen_launch_server(
|
||||
cls.model,
|
||||
@@ -113,19 +114,34 @@ class TestDeepseekV3MTP(CustomTestCase):
|
||||
)
|
||||
metrics = run_eval_few_shot_gsm8k(args)
|
||||
print(f"{metrics=}")
|
||||
self.assertGreater(metrics["accuracy"], 0.94)
|
||||
|
||||
server_info = requests.get(self.base_url + "/get_server_info")
|
||||
avg_spec_accept_length = server_info.json()["avg_spec_accept_length"]
|
||||
print(f"{avg_spec_accept_length=}")
|
||||
self.assertGreater(avg_spec_accept_length, 3.2)
|
||||
|
||||
if is_in_ci():
|
||||
write_github_step_summary(
|
||||
f"### test_gsm8k (deepseek-v3)\n"
|
||||
f"### test_gsm8k (deepseek-v3 mtp)\n"
|
||||
f'{metrics["accuracy"]=:.3f}\n'
|
||||
f"{avg_spec_accept_length=:.2f}\n"
|
||||
)
|
||||
self.assertGreater(metrics["accuracy"], 0.935)
|
||||
self.assertGreater(avg_spec_accept_length, 2.9)
|
||||
|
||||
def test_bs_1_speed(self):
|
||||
args = BenchArgs(port=int(self.base_url.split(":")[-1]), max_new_tokens=2048)
|
||||
acc_length, speed = send_one_prompt(args)
|
||||
|
||||
print(f"{acc_length=:.2f} {speed=:.2f}")
|
||||
|
||||
if is_in_ci():
|
||||
write_github_step_summary(
|
||||
f"### test_bs_1_speed (deepseek-v3 mtp)\n"
|
||||
f"{acc_length=:.2f}\n"
|
||||
f"{speed=:.2f} token/s\n"
|
||||
)
|
||||
self.assertGreater(acc_length, 2.9)
|
||||
self.assertGreater(speed, 105)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -26,6 +26,8 @@ class TestMLA(CustomTestCase):
|
||||
"--enable-torch-compile",
|
||||
"--cuda-graph-max-bs",
|
||||
"2",
|
||||
"--chunked-prefill-size",
|
||||
"256",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ class TestMLADeepseekV3(CustomTestCase):
|
||||
def setUpClass(cls):
|
||||
cls.model = "lmsys/sglang-ci-dsv3-test"
|
||||
cls.base_url = DEFAULT_URL_FOR_TEST
|
||||
other_args = ["--trust-remote-code"]
|
||||
other_args = ["--trust-remote-code", "--chunked-prefill-size", "256"]
|
||||
if torch.cuda.is_available() and torch.version.cuda:
|
||||
other_args.extend(["--enable-torch-compile", "--cuda-graph-max-bs", "2"])
|
||||
cls.process = popen_launch_server(
|
||||
|
||||
@@ -13,23 +13,11 @@ from sglang.test.test_utils import (
|
||||
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
|
||||
DEFAULT_URL_FOR_TEST,
|
||||
CustomTestCase,
|
||||
is_in_ci,
|
||||
popen_launch_server,
|
||||
run_bench_one_batch,
|
||||
)
|
||||
|
||||
|
||||
class TestTorchNativeAttnBackend(CustomTestCase):
|
||||
def test_latency(self):
|
||||
output_throughput = run_bench_one_batch(
|
||||
DEFAULT_MODEL_NAME_FOR_TEST,
|
||||
["--attention-backend", "torch_native"],
|
||||
)
|
||||
|
||||
if is_in_ci():
|
||||
# Torch native backend is expected to be slower
|
||||
self.assertGreater(output_throughput, 40)
|
||||
|
||||
def test_mmlu(self):
|
||||
model = DEFAULT_MODEL_NAME_FOR_TEST
|
||||
base_url = DEFAULT_URL_FOR_TEST
|
||||
|
||||
@@ -1,23 +1,29 @@
|
||||
import unittest
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase, is_in_ci, run_bench_one_batch
|
||||
from sglang.test.test_utils import (
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
|
||||
CustomTestCase,
|
||||
is_in_ci,
|
||||
run_bench_offline_throughput,
|
||||
)
|
||||
|
||||
|
||||
class TestTorchTP(CustomTestCase):
|
||||
def test_torch_native_llama(self):
|
||||
output_throughput = run_bench_one_batch(
|
||||
"meta-llama/Meta-Llama-3-8B",
|
||||
output_throughput = run_bench_offline_throughput(
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
|
||||
[
|
||||
"--tp",
|
||||
"2",
|
||||
"--json-model-override-args",
|
||||
'{"architectures": ["TorchNativeLlamaForCausalLM"]}',
|
||||
# This cannot run anymore with the new torch version.
|
||||
# "--json-model-override-args",
|
||||
# '{"architectures": ["TorchNativeLlamaForCausalLM"]}',
|
||||
"--disable-cuda-graph",
|
||||
],
|
||||
)
|
||||
|
||||
if is_in_ci():
|
||||
assert output_throughput > 0, f"{output_throughput=}"
|
||||
self.assertGreater(output_throughput, 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -15,13 +15,13 @@ from sglang.test.test_utils import (
|
||||
CustomTestCase,
|
||||
is_in_ci,
|
||||
popen_launch_server,
|
||||
run_bench_one_batch,
|
||||
run_bench_offline_throughput,
|
||||
)
|
||||
|
||||
|
||||
class TestTritonAttnBackend(CustomTestCase):
|
||||
def test_latency(self):
|
||||
output_throughput = run_bench_one_batch(
|
||||
output_throughput = run_bench_offline_throughput(
|
||||
DEFAULT_MODEL_NAME_FOR_TEST,
|
||||
[
|
||||
"--attention-backend",
|
||||
@@ -32,6 +32,8 @@ class TestTritonAttnBackend(CustomTestCase):
|
||||
],
|
||||
)
|
||||
|
||||
print(f"{output_throughput=}")
|
||||
|
||||
if is_in_ci():
|
||||
self.assertGreater(output_throughput, 153)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user