import torch import torch_mlu import torch_mlu_ops as tmo from common import benchmark_forward, save_to_csv import argparse from tabulate import tabulate import os e2e_time_param_dict_list = [{"batch": 1, "seq_len": 1024, "head_num": 32, "head_size": 128, "rotary_dim": 128, "interleaved": False, "discrete": True, "dynamic_ntk": False, "input_dtype": [torch.float16, torch.bfloat16]}, {"batch": 1, "seq_len": 1024, "head_num": 40, "head_size": 128, "rotary_dim": 64, "interleaved": True, "discrete": False, "dynamic_ntk": False, "input_dtype": [torch.float16, torch.bfloat16]}, {"batch": 1, "seq_len": 1024, "head_num": 52, "head_size": 128, "rotary_dim": 128, "interleaved": False, "discrete": True, "dynamic_ntk": False, "input_dtype": [torch.float16, torch.bfloat16]}, {"batch": 1, "seq_len": 1024, "head_num": 64, "head_size": 128, "rotary_dim": 128, "interleaved": False, "discrete": True, "dynamic_ntk": False, "input_dtype": [torch.float16, torch.bfloat16]}, {"batch": 1, "seq_len": 1024, "head_num": 25, "head_size": 64, "rotary_dim": 64, "interleaved": False, "discrete": True, "dynamic_ntk": False, "input_dtype": [torch.float16, torch.bfloat16]}, {"batch": 1, "seq_len": 1024, "head_num": 64, "head_size": 96, "rotary_dim": 96, "interleaved": False, "discrete": True, "dynamic_ntk": False, "input_dtype": [torch.float16, torch.bfloat16]}, {"batch": 4, "seq_len": 1, "head_num": 80, "head_size": 128, "rotary_dim": 128, "interleaved": False, "discrete": True, "dynamic_ntk": False, "input_dtype": [torch.float16, torch.bfloat16]}] def main(): parser = argparse.ArgumentParser() parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing') parser.add_argument('--csv', action='store_true', help='write the report data to csv') parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode') args = parser.parse_args() device = 'mlu' titles = ["batch", "seq_len", "head_num", "head_size", "rotary_dim", "interleaved", "discrete", "dynamic_ntk", "input_dtype", "hardware_time(us)", "e2e_latency(us)"] contents = [] for params_dict in e2e_time_param_dict_list: batch = params_dict["batch"] seq_len = params_dict["seq_len"] head_num = params_dict["head_num"] head_size = params_dict["head_size"] # full/partial rotary_dim = params_dict["rotary_dim"] # cross/fold interleaved = params_dict["interleaved"] # discrete discrete = params_dict["discrete"] # dynamic_ntk dynamic_ntk = params_dict["dynamic_ntk"] input_dtype_list = params_dict["input_dtype"] for dtype in input_dtype_list: if dtype == torch.bfloat16 and not torch_mlu.mlu.is_bf16_supported(): continue input = torch.randn(batch, seq_len, head_num, head_size).to(device).to(dtype) # [batch, seqlen, head_num, head_size] if dynamic_ntk: sin_cache = torch.randn(batch, seq_len, rotary_dim).to(device).to(dtype) cos_cache = torch.randn(batch, seq_len, rotary_dim).to(device).to(dtype) else: sin_cache = torch.randn(seq_len, rotary_dim).to(device).to(dtype) cos_cache = torch.randn(seq_len, rotary_dim).to(device).to(dtype) if discrete: pos_ids = torch.randint(0, seq_len, (batch * seq_len,)).to(device).to(torch.int32) else: pos_ids = None hardware_time, e2e_time = benchmark_forward(tmo.apply_rotary, input, sin_cache, cos_cache, pos_ids, None, interleaved, discrete, dynamic_ntk, seq_len, repeats=args.repeat_times) content = [f"{batch}", f"{seq_len}", f"{head_num}", f"{head_size}", f"{rotary_dim}", f"{interleaved}", f"{discrete}", f"{dynamic_ntk}", f"{dtype}", f"{hardware_time}", f"{e2e_time}"] contents.append(content) table = [titles] + contents print(tabulate(table, headers="firstrow", tablefmt="grid")) if args.csv: current_file_path = __file__ _, file_name = os.path.split(current_file_path) save_to_csv(table, args.o, file_name) if __name__=="__main__": main()