import torch import torch_mlu import torch_mlu_ops as tmo from common import * import argparse from tabulate import tabulate import os e2e_time_param_dict_list = [{"num_batch": 1, "seq_len": 1, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 32, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 72, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 1024, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 2048, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 4096, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 8192, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 32768, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 1, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 2, "seq_len": 16, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 2, "seq_len": 36, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 8, "seq_len": 128, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 16, "seq_len": 128, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 4, "seq_len": 1024, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 2, "seq_len": 4096, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 16, "seq_len": 2048, "num_expert": 32, "topk": 5, "num_expert_group": -1, "topk_group": -1, "normalize": True, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 1, "num_expert": 160, "topk": 6, "num_expert_group": 8, "topk_group": 3, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 16, "num_expert": 160, "topk": 6, "num_expert_group": 8, "topk_group": 3, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 64, "num_expert": 160, "topk": 6, "num_expert_group": 8, "topk_group": 3, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 1024, "num_expert": 160, "topk": 6, "num_expert_group": 8, "topk_group": 3, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 2048, "num_expert": 160, "topk": 6, "num_expert_group": 8, "topk_group": 3, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 8192, "num_expert": 160, "topk": 6, "num_expert_group": 8, "topk_group": 3, "normalize": False, "input_dtype": torch.bfloat16}, {"num_batch": 1, "seq_len": 32768, "num_expert": 160, "topk": 6, "num_expert_group": 8, "topk_group": 3, "normalize": False, "input_dtype": torch.bfloat16}, ] def main(): parser = argparse.ArgumentParser() parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing') parser.add_argument('--csv', action='store_true', help='write the report data to csv') parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode') args = parser.parse_args() titles = ["num_batch", "seq_len", "num_expert", "topk", "num_expert_group", "topk_group", "normalize", "input_dtype", "hardware_time(us)", "e2e_latency(us)", "IO efficiency"] contents = [] bd = get_band_width() for params_dict in e2e_time_param_dict_list: num_batch = params_dict["num_batch"] seq_len = params_dict["seq_len"] num_expert = params_dict["num_expert"] topk = params_dict["topk"] num_expert_group = params_dict["num_expert_group"] topk_group = params_dict["topk_group"] normalize = params_dict["normalize"] dtype = params_dict["input_dtype"] if dtype == torch.bfloat16 and not torch_mlu.mlu.is_bf16_supported(): dtype = torch.half input = torch.randn(num_batch, seq_len, num_expert, dtype=dtype, device='mlu') mask = torch.randint(0, 2, (1, seq_len, num_expert), dtype = dtype, device='mlu') if num_expert_group > 1: mask = None normed_by = "softmax_logit" reduce_weight = torch.empty(num_batch, topk, dtype=torch.float, device='mlu') expert_id = torch.empty(num_batch, topk, dtype=torch.int32, device='mlu') hardware_time, e2e_time = benchmark_forward(tmo.moe_softmax_topk, input, topk, normalize, num_expert_group, topk_group, mask, normed_by, repeats=args.repeat_times) io_bytes = input.element_size() * input.nelement() + \ reduce_weight.element_size() * reduce_weight.nelement() + \ expert_id.element_size() * expert_id.nelement() io_eff = io_bytes / hardware_time / bd content = [f"{num_batch}", f"{seq_len}", f"{num_expert}", f"{topk}", f"{num_expert_group}", f"{topk_group}", f"{normalize}", f"{dtype}", f"{hardware_time}", f"{e2e_time}", f"{io_eff}"] contents.append(content) table = [titles] + contents print(tabulate(table, headers="firstrow", tablefmt="grid")) if args.csv: current_file_path = __file__ _, file_name = os.path.split(current_file_path) save_to_csv(table, args.o, file_name) if __name__=="__main__": main()