forked from EngineX-Cambricon/enginex-mlu370-vllm
115 lines
5.9 KiB
Python
115 lines
5.9 KiB
Python
import torch
|
|
import torch_mlu
|
|
import torch_mlu_ops as tmo
|
|
from common import *
|
|
import argparse
|
|
from tabulate import tabulate
|
|
import os
|
|
import random
|
|
|
|
params_dict = [
|
|
{"token_num": 1, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 16, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 128, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 490, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 512, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 525, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 2048, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 4096, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 8192, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
{"token_num": 32768, "hidden_size": 8192, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": True, "dtype": torch.bfloat16},
|
|
|
|
{"token_num": 1, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 16, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 128, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 490, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 512, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 525, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 2048, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 4096, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 8192, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
{"token_num": 32768, "hidden_size": 1024, "expert_num": 32, "topk": 5,
|
|
"has_gather_idx": False, "dtype": torch.bfloat16},
|
|
]
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing')
|
|
parser.add_argument('--csv', action='store_true', help='write the report data to csv')
|
|
parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode')
|
|
|
|
args = parser.parse_args()
|
|
device = 'mlu'
|
|
titles = ["token_num", "hidden_size", "expert_num", "topk", "has_gather_idx", "dtype",
|
|
"hardware_time(us)", "e2e_latency(us)", "IO efficiency"]
|
|
contents = []
|
|
bd = get_band_width()
|
|
for param in params_dict:
|
|
token_num, hidden_size, expert_num, topk, has_gather_idx, dtype = param.values()
|
|
if dtype == torch.bfloat16 and not torch_mlu.mlu.is_bf16_supported():
|
|
dtype = torch.half
|
|
if "MLU3" in torch.mlu.get_device_name():
|
|
has_gather_idx = False
|
|
expand_token_num = token_num * topk
|
|
input_shape = (token_num if has_gather_idx else expand_token_num, hidden_size)
|
|
input = torch.randn(input_shape).to(device).to(dtype)
|
|
scale = torch.randn(expert_num, hidden_size).to(device).to(torch.float32)
|
|
|
|
avg, rem = expand_token_num // expert_num, expand_token_num % expert_num
|
|
m_list = [avg + (i < rem) for i in range(expert_num)]
|
|
token_count = torch.tensor(m_list, dtype=torch.int32, device='mlu')
|
|
|
|
if has_gather_idx:
|
|
gather_idx = torch.arange(0, token_num).repeat([topk])
|
|
gather_idx = gather_idx[torch.randperm(gather_idx.size(0))].to(torch.int32).mlu()
|
|
else:
|
|
gather_idx = None
|
|
|
|
hardware_time, e2e_time = benchmark_forward(tmo.moe_quantize,
|
|
input,
|
|
scale,
|
|
None,
|
|
token_count,
|
|
gather_idx,
|
|
None,
|
|
None,
|
|
None,
|
|
True,
|
|
repeats=args.repeat_times)
|
|
expand_num = topk if has_gather_idx else 1
|
|
io_bytes = (input.element_size() + 1) * input.nelement() * expand_num
|
|
io_eff = io_bytes / hardware_time / bd
|
|
content = [f"{token_num}", f"{hidden_size}", f"{expert_num}",
|
|
f"{topk}", f"{has_gather_idx}", f"{dtype}",
|
|
f"{hardware_time}", f"{e2e_time}", f"{io_eff}"]
|
|
contents.append(content)
|
|
table = [titles] + contents
|
|
print(tabulate(table, headers="firstrow", tablefmt="grid"))
|
|
|
|
if args.csv:
|
|
current_file_path = __file__
|
|
_, file_name = os.path.split(current_file_path)
|
|
save_to_csv(table, args.o, file_name)
|
|
|
|
if __name__=="__main__":
|
|
main()
|