forked from EngineX-Cambricon/enginex-mlu370-vllm
110 lines
5.8 KiB
Python
110 lines
5.8 KiB
Python
import torch
|
|
import torch_mlu
|
|
import torch_mlu_ops as tmo
|
|
from common import benchmark_forward, save_to_csv
|
|
import argparse
|
|
from tabulate import tabulate
|
|
import os
|
|
import random
|
|
|
|
e2e_time_param_dict_list = [{"max_batch": 20, "batch": 10, "cache_mem_len": 1024, "max_context_len": 512,
|
|
"max_seq_offset": 20, "head_num_q": 32, "head_num_kv": 32, "head_size": 128,
|
|
"packed": True, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"max_batch": 20, "batch": 10, "cache_mem_len": 1024, "max_context_len": 512,
|
|
"max_seq_offset": 20, "head_num_q": 32, "head_num_kv": 32, "head_size": 128,
|
|
"packed": False, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"max_batch": 20, "batch": 10, "cache_mem_len": 1024, "max_context_len": 512,
|
|
"max_seq_offset": 20, "head_num_q": 32, "head_num_kv": 32, "head_size": 128,
|
|
"packed": True, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"max_batch": 20, "batch": 10, "cache_mem_len": 1024, "max_context_len": 512,
|
|
"max_seq_offset": 20, "head_num_q": 32, "head_num_kv": 32, "head_size": 128,
|
|
"packed": False, "input_dtype": [torch.float16, torch.bfloat16]}
|
|
]
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing')
|
|
parser.add_argument('--csv', action='store_true', help='write the report data to csv')
|
|
parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode')
|
|
|
|
args = parser.parse_args()
|
|
titles = ["batch", "max_context_len", "head_num_q", "head_num_kv", "head_size", "packed", "input_dytpe", "hardware_time(us)", "e2e_latency(us)"]
|
|
contents = []
|
|
for params_dict in e2e_time_param_dict_list:
|
|
max_batch = params_dict["max_batch"]
|
|
batch = params_dict["batch"]
|
|
cache_mem_len = params_dict["cache_mem_len"]
|
|
max_context_len = params_dict["max_context_len"]
|
|
max_seq_offset = params_dict["max_seq_offset"]
|
|
head_num_q = params_dict["head_num_q"]
|
|
head_num_kv = params_dict["head_num_kv"]
|
|
head_size = params_dict["head_size"]
|
|
packed = params_dict["packed"]
|
|
input_dtype_list = params_dict["input_dtype"]
|
|
for dtype in input_dtype_list:
|
|
if dtype == torch.bfloat16 and not torch_mlu.mlu.is_bf16_supported():
|
|
continue
|
|
context_lens = torch.randint(size=(batch, ), low=max_context_len,
|
|
high=max_context_len+1,
|
|
dtype=torch.int32, device='mlu')
|
|
# max_seq_offset = max_context_len // 3 + 1
|
|
context_seq_offsets = torch.randint(size=(batch, ), low=max_seq_offset, high=max_seq_offset+1,
|
|
dtype=torch.int32, device='mlu')
|
|
cache_seq_offsets = torch.randint(size=(batch, ), low=-1,
|
|
high=(cache_mem_len - max_context_len) // 3 + 1,
|
|
dtype=torch.int32, device='mlu')
|
|
|
|
cu_context_lens = torch.cumsum(context_lens, dim=-1)
|
|
cu_context_lens = torch.nn.functional.pad(cu_context_lens, (1,0), "constant", 0).to(torch.int32)
|
|
total_seqlen = cu_context_lens[-1]
|
|
total_heads = head_num_q + 2 * head_num_kv
|
|
if packed > 0:
|
|
context = torch.randn((total_seqlen, total_heads, head_size),
|
|
dtype=torch.float, device='mlu')
|
|
else:
|
|
context = torch.randn((batch, max_context_len + max_seq_offset, total_heads, head_size),
|
|
dtype=torch.float, device='mlu')
|
|
cache = torch.randn((2, max_batch, head_num_kv, cache_mem_len, head_size), dtype=torch.float, device='mlu')
|
|
context = context.to(dtype)
|
|
cache = cache.to(dtype)
|
|
key = context[..., head_num_q : head_num_q + head_num_kv, :]
|
|
value = context[..., head_num_q + head_num_kv : head_num_q + 2 * head_num_kv, :]
|
|
key_cache = cache[0]
|
|
value_cache = cache[1]
|
|
|
|
cache_bs_id = None
|
|
cache_bs_id = random.sample([*range(0, max_batch)], batch)
|
|
cache_bs_id = torch.IntTensor(cache_bs_id).mlu()
|
|
|
|
if packed > 0:
|
|
hardware_time, e2e_time = benchmark_forward(tmo.reshape_linear_cache,
|
|
key, value,
|
|
key_cache, value_cache,
|
|
cu_context_lens, max_context_len,
|
|
packed > 0, None,
|
|
cache_bs_id, cache_seq_offsets,
|
|
repeats=args.repeat_times)
|
|
|
|
else:
|
|
hardware_time, e2e_time = benchmark_forward(tmo.reshape_linear_cache,
|
|
key, value,
|
|
key_cache, value_cache,
|
|
context_lens, max_context_len,
|
|
packed > 0, context_seq_offsets,
|
|
cache_bs_id, cache_seq_offsets,
|
|
repeats=args.repeat_times)
|
|
|
|
content = [f"{batch}", f"{max_context_len}", f"{head_num_q}", f"{head_num_kv}", f"{head_size}", f"{packed}", f"{dtype}", f"{hardware_time}", f"{e2e_time}"]
|
|
contents.append(content)
|
|
table = [titles] + contents
|
|
print(tabulate(table, headers="firstrow", tablefmt="grid"))
|
|
|
|
if args.csv:
|
|
current_file_path = __file__
|
|
_, file_name = os.path.split(current_file_path)
|
|
save_to_csv(table, args.o, file_name)
|
|
|
|
|
|
if __name__=="__main__":
|
|
main()
|