forked from EngineX-Cambricon/enginex-mlu370-vllm
167 lines
7.2 KiB
Python
167 lines
7.2 KiB
Python
import torch
|
|
import torch_mlu
|
|
import torch_mlu_ops as tmo
|
|
from common import *
|
|
import argparse
|
|
from tabulate import tabulate
|
|
import os
|
|
|
|
e2e_time_param_dict_list = [
|
|
{"num_tokens": 16, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
{"num_tokens": 128, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
{"num_tokens": 490, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
{"num_tokens": 525, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
{"num_tokens": 2048, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
{"num_tokens": 4096, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
{"num_tokens": 8192, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
{"num_tokens": 32768, "num_expert": 32, "topk": 5, "start_expert_id": 0,
|
|
"expert_size": 32, "has_residual": False, "hidden_size": 8192,
|
|
"dtype": [torch.bfloat16]},
|
|
]
|
|
|
|
def gen_case(num_tokens,
|
|
topk,
|
|
hidden_size,
|
|
num_expert,
|
|
expert_size,
|
|
has_bias,
|
|
has_residual,
|
|
dtype,
|
|
device):
|
|
input = torch.randn((num_tokens * topk, hidden_size), dtype=dtype, device=device)
|
|
reduce_weight = torch.randn((num_tokens, topk), dtype=torch.float32, device=device)
|
|
gather_ids = torch.randperm(num_tokens * topk, dtype=torch.int32, device=device)
|
|
bias = None
|
|
residual = None
|
|
|
|
cusum_token_count = None
|
|
|
|
if has_bias:
|
|
bias = torch.randn((num_expert, hidden_size), dtype=dtype, device=device)
|
|
if has_residual:
|
|
residual = torch.randn((num_tokens, hidden_size), dtype=dtype, device=device)
|
|
|
|
if has_bias or expert_size < num_expert:
|
|
cusum_token_count, _ = generate_token_count(num_expert, num_tokens * topk)
|
|
cusum_token_count = cusum_token_count.to(device=device)
|
|
return input, reduce_weight, gather_ids, residual, bias, cusum_token_count
|
|
|
|
|
|
def get_io_bytes(num_tokens,
|
|
topk,
|
|
hidden_size,
|
|
num_expert,
|
|
expert_size,
|
|
start_expert_id,
|
|
has_bias,
|
|
has_residual,
|
|
dtype,
|
|
cusum_token_count,
|
|
gather_ids):
|
|
io_bytes = 0
|
|
dtype_size = 4 if dtype is torch.float32 else 2
|
|
if cusum_token_count is not None:
|
|
filtered_ids = (gather_ids >= cusum_token_count[start_expert_id]) * \
|
|
(gather_ids < cusum_token_count[start_expert_id + expert_size])
|
|
filtered_ids = filtered_ids.to(dtype=torch.float32)
|
|
io_bytes += torch.sum(filtered_ids).item() * hidden_size * dtype_size
|
|
else:
|
|
io_bytes += num_tokens * topk * hidden_size * dtype_size
|
|
|
|
if has_bias:
|
|
io_bytes += expert_size * hidden_size * dtype_size
|
|
|
|
if has_residual:
|
|
io_bytes += num_tokens * hidden_size * dtype_size
|
|
|
|
io_bytes += num_tokens * topk * 4
|
|
io_bytes += num_tokens * hidden_size * dtype_size
|
|
return io_bytes
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing')
|
|
parser.add_argument('--csv', action='store_true', help='write the report data to csv')
|
|
parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode')
|
|
|
|
args = parser.parse_args()
|
|
device = 'mlu'
|
|
titles = ["num_tokens", "num_expert", "topk", "start_expert_id", "expert_size", \
|
|
"hidden_size", "has_residual", "dtype", "hardware_time(us)", "e2e_latency(us)", "io_coeff"]
|
|
contents = []
|
|
bandwidth = get_band_width()
|
|
for params_dict in e2e_time_param_dict_list:
|
|
num_tokens = params_dict["num_tokens"]
|
|
num_expert = params_dict["num_expert"]
|
|
topk = params_dict["topk"]
|
|
start_expert_id = params_dict["start_expert_id"]
|
|
expert_size = params_dict["expert_size"]
|
|
has_residual = params_dict["has_residual"]
|
|
hidden_size = params_dict["hidden_size"]
|
|
dtype_list = params_dict["dtype"]
|
|
for dtype in dtype_list:
|
|
if dtype == torch.bfloat16 and not torch_mlu.mlu.is_bf16_supported():
|
|
continue
|
|
inputs = gen_case(num_tokens,
|
|
topk,
|
|
hidden_size,
|
|
num_expert,
|
|
expert_size,
|
|
False,
|
|
has_residual,
|
|
dtype,
|
|
device)
|
|
input = inputs[0]
|
|
reduce_weight = inputs[1]
|
|
gather_ids = inputs[2]
|
|
residual = inputs[3]
|
|
bias = inputs[4]
|
|
cusum_token_count = inputs[5]
|
|
|
|
io_bytes = get_io_bytes(num_tokens,
|
|
topk,
|
|
hidden_size,
|
|
num_expert,
|
|
expert_size,
|
|
start_expert_id,
|
|
False,
|
|
has_residual,
|
|
dtype,
|
|
cusum_token_count,
|
|
gather_ids)
|
|
|
|
hardware_time, e2e_time = benchmark_forward(tmo.moe_combine_result, input, reduce_weight,
|
|
gather_ids,residual, cusum_token_count,
|
|
start_expert_id, expert_size,
|
|
repeats=args.repeat_times)
|
|
io_coeff = io_bytes / hardware_time / bandwidth
|
|
content = [f"{num_tokens}", f"{num_expert}", f"{topk}", f"{start_expert_id}", \
|
|
f"{expert_size}", f"{hidden_size}", f"{has_residual}", f"{dtype}", \
|
|
f"{hardware_time}", f"{e2e_time}", f"{io_coeff}"]
|
|
contents.append(content)
|
|
table = [titles] + contents
|
|
print(tabulate(table, headers="firstrow", tablefmt="grid"))
|
|
|
|
if args.csv:
|
|
current_file_path = __file__
|
|
_, file_name = os.path.split(current_file_path)
|
|
save_to_csv(table, args.o, file_name)
|
|
|
|
if __name__=="__main__":
|
|
main()
|