forked from EngineX-Cambricon/enginex-mlu370-vllm
93 lines
5.1 KiB
Python
93 lines
5.1 KiB
Python
import torch
|
|
import torch_mlu
|
|
import torch_mlu_ops as tmo
|
|
from common import benchmark_forward, save_to_csv
|
|
import argparse
|
|
from tabulate import tabulate
|
|
import os
|
|
|
|
# for e2e time test
|
|
e2e_time_param_dict_list = [{"batch": 1, "seq_q": 32768, "seq_kv": 32768, "head_num": 8,
|
|
"head_num_kv": 1, "head_size": 128, "use_causal": True,
|
|
"softmax_scale": 1e-6, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"batch": 1, "seq_q": 16384, "seq_kv": 16384, "head_num": 8,
|
|
"head_num_kv": 1, "head_size": 128, "use_causal": True,
|
|
"softmax_scale": 1e-6, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"batch": 1, "seq_q": 8192, "seq_kv": 24576, "head_num": 8,
|
|
"head_num_kv": 1, "head_size": 128, "use_causal": True,
|
|
"softmax_scale": 1e-6, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"batch": 1, "seq_q": 4096, "seq_kv": 28672, "head_num": 8,
|
|
"head_num_kv": 1, "head_size": 128, "use_causal": True,
|
|
"softmax_scale": 1e-6, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"batch": 1, "seq_q": 4096, "seq_kv": 32768, "head_num": 8,
|
|
"head_num_kv": 1, "head_size": 128, "use_causal": True,
|
|
"softmax_scale": 1e-6, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
]
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing')
|
|
parser.add_argument('--csv', action='store_true', help='write the report data to csv')
|
|
parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode')
|
|
|
|
args = parser.parse_args()
|
|
device = 'mlu'
|
|
titles = ["batch", "seq_q", "seq_kv", "head_num", "head_num_kv", "head_size", "use_causal", "input_dtype", "hardware_time(us)", "e2e_latency(us)"]
|
|
contents = []
|
|
for params_dict in e2e_time_param_dict_list:
|
|
batch = params_dict["batch"]
|
|
seq_q = params_dict["seq_q"]
|
|
seq_kv = params_dict["seq_kv"]
|
|
head_num = params_dict["head_num"]
|
|
head_num_kv = params_dict["head_num_kv"]
|
|
head_size = params_dict["head_size"]
|
|
use_causal = params_dict["use_causal"]
|
|
softmax_scale = params_dict["softmax_scale"]
|
|
input_dtype_list = params_dict["input_dtype"]
|
|
for dtype in input_dtype_list:
|
|
if dtype == torch.bfloat16 and not torch_mlu.mlu.is_bf16_supported():
|
|
continue
|
|
if seq_q == seq_kv:
|
|
qkv = torch.randn(batch, seq_q, head_num + 2 * head_num_kv, head_size).to(dtype).to(device)
|
|
q = qkv[:, :, : head_num, :]
|
|
k = qkv[:, :, head_num : head_num + head_num_kv, :]
|
|
v = qkv[:, :, head_num + head_num_kv : head_num + head_num * 2, :]
|
|
elif seq_q < seq_kv:
|
|
q = torch.randn(batch, seq_q, head_num, head_size).to(device).to(dtype)
|
|
kv = torch.randn(batch, seq_kv, head_num_kv * 2, head_size).to(device).to(dtype)
|
|
k = kv[:, :, : head_num_kv, :]
|
|
v = kv[:, :, head_num_kv :, :]
|
|
hardware_time, e2e_time = benchmark_forward(tmo.flash_attention,
|
|
q = q,
|
|
k = k,
|
|
v = v,
|
|
out = None,
|
|
cu_seq_lens_q = None,
|
|
cu_seq_lens_kv = None,
|
|
alibi_slope = None,
|
|
attn_bias = None,
|
|
max_seq_len_q = seq_q,
|
|
max_seq_len_kv = seq_kv,
|
|
softmax_scale = softmax_scale,
|
|
is_causal = use_causal,
|
|
window_size_left = -1,
|
|
window_size_right = -1,
|
|
compute_dtype = dtype,
|
|
return_lse = False,
|
|
block_tables = None,
|
|
k_cache_quant_scale = None,
|
|
v_cache_quant_scale = None,
|
|
repeats=args.repeat_times)
|
|
content = [f"{batch}", f"{seq_q}", f"{seq_kv}", f"{head_num}", f"{head_num_kv}", f"{head_size}", f"{use_causal}", f"{dtype}", f"{hardware_time}", f"{e2e_time}"]
|
|
contents.append(content)
|
|
table = [titles] + contents
|
|
print(tabulate(table, headers="firstrow", tablefmt="grid"))
|
|
|
|
if args.csv:
|
|
current_file_path = __file__
|
|
_, file_name = os.path.split(current_file_path)
|
|
save_to_csv(table, args.o, file_name)
|
|
|
|
if __name__=="__main__":
|
|
main()
|