forked from EngineX-Cambricon/enginex-mlu370-vllm
77 lines
3.9 KiB
Python
77 lines
3.9 KiB
Python
import torch
|
|
import torch_mlu
|
|
import torch_mlu_ops as tmo
|
|
from common import benchmark_forward, save_to_csv
|
|
import argparse
|
|
from tabulate import tabulate
|
|
import os
|
|
import random
|
|
|
|
e2e_time_param_dict_list = [{"num_tokens": 1024, "num_block": 500, "block_size": 6, "head_num_q": 32,
|
|
"head_num_kv": 32, "head_size": 128, "quantize": True, "input_dtype": [torch.float16, torch.bfloat16]},
|
|
{"num_tokens": 1024, "num_block": 500, "block_size": 6, "head_num_q": 32,
|
|
"head_num_kv": 32, "head_size": 128, "quantize": False, "input_dtype": [torch.float16, torch.bfloat16]}
|
|
]
|
|
|
|
def main():
|
|
if 'MLU3' in torch.mlu.get_device_name():
|
|
exit()
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--repeat_times', type=int, default=10, help='repeat times for testing')
|
|
parser.add_argument('--csv', action='store_true', help='write the report data to csv')
|
|
parser.add_argument('-o', type=str, help='specify the output folder name under --csv mode')
|
|
|
|
args = parser.parse_args()
|
|
titles = ["num_tokens", "num_block", "block_size", "head_num_q", "head_num_kv", "head_size", "input_dytpe", "quantize", "hardware_time(us)", "e2e_latency(us)"]
|
|
contents = []
|
|
for params_dict in e2e_time_param_dict_list:
|
|
num_tokens = params_dict["num_tokens"]
|
|
num_blocks = params_dict["num_block"]
|
|
block_size = params_dict["block_size"]
|
|
head_num_q = params_dict["head_num_q"]
|
|
head_num_kv = params_dict["head_num_kv"]
|
|
head_size = params_dict["head_size"]
|
|
quantize = params_dict["quantize"]
|
|
input_dtype_list = params_dict["input_dtype"]
|
|
for dtype in input_dtype_list:
|
|
qkv = torch.randn(num_tokens, head_num_q + 2 * head_num_kv, head_size, dtype=dtype).mlu()
|
|
key = qkv[:, head_num_q : head_num_q + head_num_kv, :]
|
|
value = qkv[:, head_num_q + head_num_kv : head_num_q + 2 * head_num_kv, :]
|
|
|
|
key_cache = torch.randn(num_blocks, head_num_kv, block_size, head_size, dtype=dtype).mlu()
|
|
value_cache = torch.randn(num_blocks, head_num_kv, block_size, head_size, dtype=dtype).mlu()
|
|
|
|
num_slots = num_blocks * block_size
|
|
slot_mapping = random.sample(range(num_slots), num_tokens)
|
|
slot_mapping = torch.tensor(slot_mapping, dtype=torch.int).mlu()
|
|
slot_mapping[-1] = -1
|
|
if not quantize:
|
|
hardware_time, e2e_time = benchmark_forward(tmo.reshape_paged_cache,
|
|
key, value,
|
|
key_cache, value_cache,
|
|
slot_mapping,
|
|
repeats=args.repeat_times)
|
|
else:
|
|
k_cache_quant_scale = torch.randn(num_blocks, head_num_kv, block_size).to('mlu').to(torch.float32)
|
|
v_cache_quant_scale = torch.randn(num_blocks, head_num_kv, block_size).to('mlu').to(torch.float32)
|
|
hardware_time, e2e_time = benchmark_forward(tmo.quant_to_paged_cache,
|
|
key, value,
|
|
key_cache, value_cache,
|
|
k_cache_quant_scale,
|
|
v_cache_quant_scale,
|
|
slot_mapping,
|
|
repeats=args.repeat_times)
|
|
|
|
content = [f"{num_tokens}", f"{num_blocks}", f"{block_size}", f"{head_num_q}", f"{head_num_kv}", f"{head_size}", f"{dtype}", f"{quantize}", f"{hardware_time}", f"{e2e_time}"]
|
|
contents.append(content)
|
|
table = [titles] + contents
|
|
print(tabulate(table, headers="firstrow", tablefmt="grid"))
|
|
|
|
if args.csv:
|
|
current_file_path = __file__
|
|
_, file_name = os.path.split(current_file_path)
|
|
save_to_csv(table, args.o, file_name)
|
|
|
|
if __name__=="__main__":
|
|
main()
|