Fix lora bench (#6302)

This commit is contained in:
Qiaolin Yu
2025-05-15 13:09:55 -04:00
committed by GitHub
parent f194e14fb7
commit cd8d4b9dfc
2 changed files with 3 additions and 3 deletions

View File

@@ -170,6 +170,7 @@ async def benchmark(
prompt_len=test_prompt_len,
output_len=test_output_len,
lora_name="dummy", # the lora_name argument will not be used
image_data=None,
extra_request_body=extra_request_body,
)
test_output = await request_func(request_func_input=test_input)
@@ -194,6 +195,7 @@ async def benchmark(
prompt_len=prompt_len,
output_len=output_len,
lora_name="dummy",
image_data=None,
extra_request_body=extra_request_body,
)
tasks.append(

View File

@@ -170,9 +170,7 @@ class LoRAManager:
dim=0,
out=self.cuda_graph_batch_info.seg_indptr[1 : bs + 1],
)
self.cuda_graph_batch_info.max_len = int(
torch.max(self.cuda_graph_batch_info.seg_lens[:bs])
)
self.cuda_graph_batch_info.max_len = 1
for i, lora_path in enumerate(forward_batch.lora_paths):
self.cuda_graph_batch_info.weight_indices[i] = (