Fix bench latency benchmark (#1225)
This commit is contained in:
5
.github/workflows/e2e-test.yml
vendored
5
.github/workflows/e2e-test.yml
vendored
@@ -38,6 +38,11 @@ jobs:
|
|||||||
cd test/srt
|
cd test/srt
|
||||||
python3 -m unittest test_serving_throughput.TestServingThroughput.test_default
|
python3 -m unittest test_serving_throughput.TestServingThroughput.test_default
|
||||||
|
|
||||||
|
- name: Benchmark Serving Latency
|
||||||
|
timeout-minutes: 10
|
||||||
|
run: |
|
||||||
|
python3 -m sglang.bench_latency --model meta-llama/Meta-Llama-3.1-8B-Instruct --batch-size 1 --input 128 --output 8
|
||||||
|
|
||||||
- name: Benchmark Serving Throughput (w/o RadixAttention)
|
- name: Benchmark Serving Throughput (w/o RadixAttention)
|
||||||
timeout-minutes: 10
|
timeout-minutes: 10
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@@ -200,16 +200,14 @@ def extend(reqs, model_runner):
|
|||||||
tree_cache=None,
|
tree_cache=None,
|
||||||
)
|
)
|
||||||
batch.prepare_for_extend(model_runner.model_config.vocab_size)
|
batch.prepare_for_extend(model_runner.model_config.vocab_size)
|
||||||
output = model_runner.forward(batch, ForwardMode.EXTEND)
|
sample_output, logits_output = model_runner.forward(batch, ForwardMode.EXTEND)
|
||||||
next_token_ids = batch.sample(output.next_token_logits)
|
return sample_output.batch_next_token_ids, logits_output.next_token_logits, batch
|
||||||
return next_token_ids, output.next_token_logits, batch
|
|
||||||
|
|
||||||
|
|
||||||
def decode(input_token_ids, batch, model_runner):
|
def decode(input_token_ids, batch, model_runner):
|
||||||
batch.prepare_for_decode(input_token_ids.cpu().numpy())
|
batch.prepare_for_decode(input_token_ids.cpu().numpy())
|
||||||
output = model_runner.forward(batch, ForwardMode.DECODE)
|
sample_output, logits_output = model_runner.forward(batch, ForwardMode.DECODE)
|
||||||
next_token_ids = batch.sample(output.next_token_logits)
|
return sample_output.batch_next_token_ids, logits_output.next_token_logits
|
||||||
return next_token_ids, output.next_token_logits
|
|
||||||
|
|
||||||
|
|
||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
|
|||||||
Reference in New Issue
Block a user