Files
2026-02-04 17:39:32 +08:00

118 lines
6.5 KiB
Python

import torch
import torch_mlu
import torch_mlu_ops as tmo
from common import *
import argparse
from tabulate import tabulate
import os
import random
e2e_time_param_dict_list = [{"batch": 1, "seq_len": 1024, "inner_size": 1024,
"act_mode": "gelu", "is_gated": True, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 1, "seq_len": 4096, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": False, "input_dtype": [torch.bfloat16]},
{"batch": 1, "seq_len": 8192, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 1, "seq_len": 32768, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 1, "seq_len": 1, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 16, "seq_len": 1, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 32, "seq_len": 1, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 64, "seq_len": 1, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 128, "seq_len": 1, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 256, "seq_len": 1, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},
{"batch": 512, "seq_len": 1, "inner_size": 1024,
"act_mode": "gelu", "is_gated": False, "has_bias": True,
"is_ep": True, "input_dtype": [torch.bfloat16]},]
def gen_data(num_expert,
total_tokens,
inner_size,
output_stride,
dtype,
is_gated,
has_bias,
is_ep):
ci = inner_size * (1 + is_gated)
input = torch.randn(total_tokens, ci, dtype=dtype, device='mlu')
cusum_token_count, token_count = generate_token_count(num_expert, total_tokens)
output = torch.empty((total_tokens, inner_size), dtype=dtype, device='mlu')
output.as_strided(output.size(), (output_stride, 1))
start_expert_id = random.randint(0, num_expert - 1) if is_ep else 0
expert_size = random.randint(1, num_expert - start_expert_id) if is_ep else num_expert
bias = torch.randn(num_expert, ci, dtype=dtype, device='mlu') if has_bias else None
return input, bias, token_count, cusum_token_count, output, start_expert_id, expert_size
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing')
parser.add_argument('--csv', action='store_true', help='write the report data to csv')
parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode')
args = parser.parse_args()
titles = ["input_shape", "act_mode", "is_gated", "has_bias", "expert_num", "start_expert_id",
"expert_size", "input_dtype", "hardware_time(us)", "e2e_latency(us)", "IO efficiency"]
contents = []
bd = get_band_width()
for params_dict in e2e_time_param_dict_list:
batch = params_dict["batch"]
seq_len = params_dict["seq_len"]
inner_size = params_dict["inner_size"]
act_mode = params_dict["act_mode"]
is_gated = params_dict["is_gated"]
input_dtype_list = params_dict["input_dtype"]
has_bias = params_dict["has_bias"]
is_ep = params_dict["is_ep"]
for dtype in input_dtype_list:
if dtype == torch.bfloat16 and not torch_mlu.mlu.is_bf16_supported():
dtype = torch.half
expert_num = expert_num = random.randint(1, 256)
input, bias, token_count, cusum_token_count, output, start_expert_id, expert_size = \
gen_data(expert_num, batch * seq_len, inner_size, inner_size, dtype, is_gated, has_bias, is_ep)
real_bias = bias[start_expert_id:start_expert_id + expert_size] if has_bias else None
hardware_time, e2e_time = benchmark_forward(tmo.moe_active,
input,
act_mode,
is_gated,
output,
real_bias,
cusum_token_count.mlu() if has_bias or is_ep else None,
start_expert_id,
expert_size,
repeats=args.repeat_times)
io_bytes = input.element_size() * input.nelement() * (2 - 0.5 * is_gated) + \
real_bias.element_size() * real_bias.nelement() + \
(cusum_token_count.element_size() * cusum_token_count.nelement()) if has_bias or is_ep else 0
io_eff = io_bytes / hardware_time / bd
content = [f"{batch,seq_len,inner_size}", f"{act_mode}", f"{is_gated}", f"{has_bias}", f"{expert_num}",
f"{start_expert_id}", f"{expert_size}", f"{dtype}", f"{hardware_time}", f"{e2e_time}", f"{io_eff}"]
contents.append(content)
table = [titles] + contents
print(tabulate(table, headers="firstrow", tablefmt="grid"))
if args.csv:
current_file_path = __file__
_, file_name = os.path.split(current_file_path)
save_to_csv(table, args.o, file_name)
if __name__=="__main__":
main()