add pkgs
This commit is contained in:
489
examples/gptj/build.py
Normal file
489
examples/gptj/build.py
Normal file
@@ -0,0 +1,489 @@
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
import tvm.tensorrt as trt
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
from transformers import AutoModelForCausalLM
|
||||
from weight import get_scaling_factors, load_from_awq_gpt_j, load_from_hf_gpt_j
|
||||
|
||||
import xtrt_llm
|
||||
from xtrt_llm.builder import Builder
|
||||
from xtrt_llm.logger import logger
|
||||
from xtrt_llm.mapping import Mapping
|
||||
from xtrt_llm.models import (weight_only_groupwise_quantize,
|
||||
weight_only_quantize)
|
||||
from xtrt_llm.network import net_guard
|
||||
from xtrt_llm.plugin.plugin import ContextFMHAType
|
||||
from xtrt_llm.quantization import QuantMode
|
||||
|
||||
MODEL_NAME = "gptj"
|
||||
hf_gpt = None
|
||||
awq_gptj_config = None
|
||||
|
||||
|
||||
def get_engine_name(model, dtype, tp_size, rank):
|
||||
return '{}_{}_tp{}_rank{}.engine'.format(model, dtype, tp_size, rank)
|
||||
|
||||
|
||||
def serialize_engine(engine, path):
|
||||
logger.info(f'Serializing engine to {path}...')
|
||||
tik = time.time()
|
||||
engine.serialize(path)
|
||||
tok = time.time()
|
||||
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
|
||||
logger.info(f'Engine serialized. Total time: {t}')
|
||||
|
||||
|
||||
def parse_arguments(args):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--world_size',
|
||||
type=int,
|
||||
default=1,
|
||||
help='world size, only support tensor parallelism now')
|
||||
parser.add_argument(
|
||||
'--model_dir',
|
||||
type=str,
|
||||
default=None,
|
||||
help='The path to HF GPT-J model / checkpoints to read weights from')
|
||||
parser.add_argument('--dtype',
|
||||
type=str,
|
||||
default='float16',
|
||||
choices=['float16', 'float32'])
|
||||
parser.add_argument('--logits_dtype',
|
||||
type=str,
|
||||
default='float32',
|
||||
choices=['float16', 'float32'])
|
||||
parser.add_argument(
|
||||
'--timing_cache',
|
||||
type=str,
|
||||
default='model.cache',
|
||||
help=
|
||||
'The path of to read timing cache from, will be ignored if the file does not exist'
|
||||
)
|
||||
parser.add_argument('--log_level', type=str, default='info')
|
||||
parser.add_argument('--vocab_size', type=int, default=50401)
|
||||
parser.add_argument('--n_layer', type=int, default=28)
|
||||
parser.add_argument('--n_positions', type=int, default=2048)
|
||||
parser.add_argument('--n_embd', type=int, default=4096)
|
||||
parser.add_argument('--n_head', type=int, default=16)
|
||||
parser.add_argument('--hidden_act', type=str, default='gelu')
|
||||
parser.add_argument('--rotary_dim', type=int, default=64)
|
||||
parser.add_argument('--max_batch_size', type=int, default=256)
|
||||
parser.add_argument('--max_input_len', type=int, default=200)
|
||||
parser.add_argument('--max_output_len', type=int, default=200)
|
||||
parser.add_argument('--max_beam_width', type=int, default=1)
|
||||
parser.add_argument('--use_gpt_attention_plugin',
|
||||
nargs='?',
|
||||
const='float16',
|
||||
type=str,
|
||||
default=False,
|
||||
choices=['float16', 'float32'])
|
||||
parser.add_argument('--use_gemm_plugin',
|
||||
nargs='?',
|
||||
const='float16',
|
||||
type=str,
|
||||
default=False,
|
||||
choices=['float16', 'float32'])
|
||||
parser.add_argument('--use_weight_only_quant_matmul_plugin',
|
||||
nargs='?',
|
||||
const='float16',
|
||||
type=str,
|
||||
default=False,
|
||||
choices=['float16'])
|
||||
parser.add_argument('--use_layernorm_plugin',
|
||||
nargs='?',
|
||||
const='float16',
|
||||
type=str,
|
||||
default=False,
|
||||
choices=['float16', 'float32'])
|
||||
parser.add_argument('--parallel_build', default=False, action='store_true')
|
||||
parser.add_argument('--enable_context_fmha',
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument('--enable_context_fmha_fp32_acc',
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument('--gpus_per_node', type=int, default=8)
|
||||
parser.add_argument(
|
||||
'--output_dir',
|
||||
type=str,
|
||||
default='gpt_outputs',
|
||||
help=
|
||||
'The path to save the serialized engine files, timing cache file and model configs'
|
||||
)
|
||||
parser.add_argument('--remove_input_padding',
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument('--enable_fp8', default=False, action='store_true')
|
||||
parser.add_argument(
|
||||
'--quantized_fp8_model_path',
|
||||
type=str,
|
||||
default=None,
|
||||
help='Path of a quantized model checkpoint that in .npz format')
|
||||
parser.add_argument(
|
||||
'--fp8_kv_cache',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help=
|
||||
'By default, we use dtype for KV cache. fp8_kv_cache chooses fp8 quantization for KV'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--use_inflight_batching',
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Activates inflight batching mode of gptAttentionPlugin.")
|
||||
parser.add_argument(
|
||||
'--enable_two_optimization_profiles',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help=
|
||||
"Enables two optimization profiles during engine build, for context and generate phases. By default (and for inflight batching too), only 1 opt profile."
|
||||
)
|
||||
parser.add_argument(
|
||||
'--paged_kv_cache',
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=
|
||||
'By default we use contiguous KV cache. By setting this flag you enable paged KV cache'
|
||||
)
|
||||
parser.add_argument('--tokens_per_block',
|
||||
type=int,
|
||||
default=64,
|
||||
help='Number of tokens per block in paged KV cache')
|
||||
parser.add_argument(
|
||||
'--max_num_tokens',
|
||||
type=int,
|
||||
default=None,
|
||||
help='Define the max number of tokens supported by the engine')
|
||||
parser.add_argument(
|
||||
'--per_group',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help=
|
||||
'By default, we use a single static scaling factor to scale weights in the int4 range. '
|
||||
'per_group chooses at run time, and for each group, a custom scaling factor. '
|
||||
'The falg is built for GPTQ/AWQ quantization.')
|
||||
parser.add_argument(
|
||||
'--use_weight_only',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help='Quantize weights for the various GEMMs to INT4/INT8.'
|
||||
'See --weight_only_precision to set the precision')
|
||||
parser.add_argument(
|
||||
'--weight_only_precision',
|
||||
const='int8',
|
||||
type=str,
|
||||
nargs='?',
|
||||
default='int8',
|
||||
choices=['int8', 'int4'],
|
||||
help=
|
||||
'Define the precision for the weights when using weight-only quantization.'
|
||||
'You must also use --use_weight_only for that argument to have an impact.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--strongly_typed',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help=
|
||||
'This option is introduced with trt 9.1.0.1+ and will reduce the building time significantly for fp8.'
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
logger.set_level(args.log_level)
|
||||
|
||||
if not args.remove_input_padding:
|
||||
if args.use_gpt_attention_plugin:
|
||||
logger.warning(
|
||||
f"It is recommended to specify --remove_input_padding when using GPT attention plugin"
|
||||
)
|
||||
|
||||
if args.model_dir is not None:
|
||||
global hf_gpt
|
||||
if args.use_weight_only and args.weight_only_precision == 'int4' and args.per_group:
|
||||
logger.info(f'Loading AWQ GPTJ model from {args.model_dir}...')
|
||||
global awq_gptj_config
|
||||
with open(args.model_dir + "/config.json",
|
||||
encoding='utf-8') as config_file:
|
||||
awq_gptj_config = json.load(config_file)
|
||||
args.n_embd = awq_gptj_config['n_embd']
|
||||
args.n_head = awq_gptj_config['n_head']
|
||||
args.n_layer = awq_gptj_config['n_layer']
|
||||
args.n_positions = awq_gptj_config['n_positions']
|
||||
args.vocab_size = awq_gptj_config['vocab_size']
|
||||
if args.vocab_size % 64 != 0:
|
||||
args.vocab_size = int(
|
||||
(awq_gptj_config['vocab_size'] + 63) / 64) * 64
|
||||
print(
|
||||
"vocab_size is {}, to use awq we pad it to {}.".format(
|
||||
awq_gptj_config['vocab_size'], args.vocab_size))
|
||||
hf_gpt = torch.load(args.model_dir + "/gptj_quantized.pth")
|
||||
else:
|
||||
logger.info(f'Loading HF GPTJ model from {args.model_dir}...')
|
||||
hf_gpt = AutoModelForCausalLM.from_pretrained(args.model_dir)
|
||||
args.n_embd = hf_gpt.config.n_embd
|
||||
args.n_head = hf_gpt.config.n_head
|
||||
args.n_layer = hf_gpt.config.n_layer
|
||||
args.n_positions = hf_gpt.config.n_positions
|
||||
args.vocab_size = hf_gpt.config.vocab_size
|
||||
|
||||
assert not (args.use_weight_only and args.weight_only_precision
|
||||
== 'int8'), "Not support int8 weight only."
|
||||
|
||||
assert not (args.use_weight_only and args.weight_only_precision == 'int4'
|
||||
and args.per_group
|
||||
== False), "We only support AWQ for int4 weight only."
|
||||
|
||||
if args.use_weight_only:
|
||||
args.quant_mode = QuantMode.use_weight_only(
|
||||
args.weight_only_precision == 'int4')
|
||||
else:
|
||||
args.quant_mode = QuantMode(0)
|
||||
|
||||
if args.fp8_kv_cache:
|
||||
assert (
|
||||
args.use_gpt_attention_plugin
|
||||
), "You have to use GPT attention plugin when fp8 KV cache is set"
|
||||
args.quant_mode = args.quant_mode.set_fp8_kv_cache()
|
||||
|
||||
if args.enable_fp8:
|
||||
args.quant_mode = args.quant_mode.set_fp8_qdq()
|
||||
|
||||
if args.use_inflight_batching:
|
||||
if not args.use_gpt_attention_plugin:
|
||||
args.use_gpt_attention_plugin = 'float16'
|
||||
logger.info(
|
||||
f"Using GPT attention plugin for inflight batching mode. Setting to default '{args.use_gpt_attention_plugin}'"
|
||||
)
|
||||
if not args.remove_input_padding:
|
||||
args.remove_input_padding = True
|
||||
logger.info(
|
||||
"Using remove input padding for inflight batching mode.")
|
||||
if not args.paged_kv_cache:
|
||||
args.paged_kv_cache = True
|
||||
logger.info("Using paged KV cache for inflight batching mode.")
|
||||
|
||||
if args.max_num_tokens is not None:
|
||||
assert args.enable_context_fmha
|
||||
|
||||
if args.remove_input_padding or args.use_inflight_batching or args.paged_kv_cache:
|
||||
assert (
|
||||
not args.enable_two_optimization_profiles
|
||||
), "Only 1 opt profile supported for inflight batching and paged kv cache."
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def build_rank_engine(builder: Builder,
|
||||
builder_config: xtrt_llm.builder.BuilderConfig,
|
||||
engine_name, rank, args):
|
||||
'''
|
||||
@brief: Build the engine on the given rank.
|
||||
@param rank: The rank to build the engine.
|
||||
@param args: The cmd line arguments.
|
||||
@return: The built engine.
|
||||
'''
|
||||
kv_dtype = trt.float16 if args.dtype == 'float16' else trt.float32
|
||||
|
||||
# Initialize Module
|
||||
xtrt_llm_gpt = xtrt_llm.models.GPTJForCausalLM(
|
||||
num_layers=args.n_layer,
|
||||
num_heads=args.n_head,
|
||||
hidden_size=args.n_embd,
|
||||
vocab_size=args.vocab_size,
|
||||
hidden_act=args.hidden_act,
|
||||
max_position_embeddings=args.n_positions,
|
||||
rotary_dim=args.rotary_dim,
|
||||
dtype=kv_dtype,
|
||||
logits_dtype=args.logits_dtype,
|
||||
mapping=Mapping(world_size=args.world_size,
|
||||
rank=rank,
|
||||
tp_size=args.world_size), # TP only
|
||||
quant_mode=args.quant_mode)
|
||||
if args.use_weight_only_quant_matmul_plugin:
|
||||
xtrt_llm_gpt = weight_only_quantize(xtrt_llm_gpt)
|
||||
if args.use_weight_only and args.weight_only_precision == 'int4':
|
||||
if args.per_group:
|
||||
xtrt_llm_gpt = weight_only_groupwise_quantize(
|
||||
model=xtrt_llm_gpt,
|
||||
quant_mode=QuantMode.from_description(
|
||||
quantize_weights=True,
|
||||
quantize_activations=False,
|
||||
per_token=False,
|
||||
per_channel=False,
|
||||
per_group=True,
|
||||
use_int4_weights=True),
|
||||
group_size=128,
|
||||
zero=False,
|
||||
pre_quant_scale=True,
|
||||
exclude_modules=[],
|
||||
)
|
||||
if args.model_dir is not None:
|
||||
assert hf_gpt is not None, f'Could not load weights from hf_gpt model as it is not loaded yet.'
|
||||
if args.enable_fp8:
|
||||
gptj_scaling_factors = get_scaling_factors(
|
||||
args.quantized_fp8_model_path, args.n_layer, args.quant_mode)
|
||||
else:
|
||||
gptj_scaling_factors = None
|
||||
if args.use_weight_only and args.weight_only_precision == 'int4' and args.per_group:
|
||||
load_from_awq_gpt_j(xtrt_llm_gpt,
|
||||
awq_gpt_j=hf_gpt,
|
||||
config=awq_gptj_config,
|
||||
dtype=args.dtype)
|
||||
else:
|
||||
load_from_hf_gpt_j(xtrt_llm_gpt,
|
||||
hf_gpt,
|
||||
args.dtype,
|
||||
scaling_factors=gptj_scaling_factors)
|
||||
|
||||
# Module -> Network
|
||||
network = builder.create_network()
|
||||
network.trt_network.name = engine_name
|
||||
if args.use_gpt_attention_plugin:
|
||||
network.plugin_config.set_gpt_attention_plugin(
|
||||
dtype=args.use_gpt_attention_plugin)
|
||||
if args.use_gemm_plugin:
|
||||
network.plugin_config.set_gemm_plugin(dtype=args.use_gemm_plugin)
|
||||
if args.use_layernorm_plugin:
|
||||
network.plugin_config.set_layernorm_plugin(
|
||||
dtype=args.use_layernorm_plugin)
|
||||
assert not (args.enable_context_fmha and args.enable_context_fmha_fp32_acc)
|
||||
if args.enable_context_fmha:
|
||||
network.plugin_config.set_context_fmha(ContextFMHAType.enabled)
|
||||
if args.enable_context_fmha_fp32_acc:
|
||||
network.plugin_config.set_context_fmha(
|
||||
ContextFMHAType.enabled_with_fp32_acc)
|
||||
if args.use_weight_only_quant_matmul_plugin:
|
||||
network.plugin_config.set_weight_only_quant_matmul_plugin(
|
||||
dtype=args.use_weight_only_quant_matmul_plugin)
|
||||
if args.use_weight_only:
|
||||
if args.per_group:
|
||||
network.plugin_config.set_weight_only_groupwise_quant_matmul_plugin(
|
||||
dtype='float16')
|
||||
if args.world_size > 1:
|
||||
network.plugin_config.set_nccl_plugin(args.dtype)
|
||||
if args.remove_input_padding:
|
||||
network.plugin_config.enable_remove_input_padding()
|
||||
if args.paged_kv_cache:
|
||||
network.plugin_config.enable_paged_kv_cache(args.tokens_per_block)
|
||||
|
||||
with net_guard(network):
|
||||
# Prepare
|
||||
network.set_named_parameters(xtrt_llm_gpt.named_parameters())
|
||||
|
||||
# Forward
|
||||
inputs = xtrt_llm_gpt.prepare_inputs(
|
||||
args.max_batch_size,
|
||||
args.max_input_len,
|
||||
args.max_output_len,
|
||||
True,
|
||||
args.max_beam_width,
|
||||
max_num_tokens=args.max_num_tokens,
|
||||
enable_two_optimization_profiles=args.
|
||||
enable_two_optimization_profiles)
|
||||
xtrt_llm_gpt(*inputs)
|
||||
|
||||
# xtrt_llm.graph_rewriting.optimize(network)
|
||||
|
||||
engine = None
|
||||
|
||||
# Network -> Engine
|
||||
engine = builder.build_engine(network, builder_config, compiler="gr")
|
||||
if rank == 0:
|
||||
config_path = os.path.join(args.output_dir, 'config.json')
|
||||
builder.save_config(builder_config, config_path)
|
||||
return engine
|
||||
|
||||
|
||||
def build(rank, args):
|
||||
# torch.cuda.set_device(rank % args.gpus_per_node)
|
||||
xtrt_llm.logger.set_level(args.log_level)
|
||||
if not os.path.exists(args.output_dir):
|
||||
os.makedirs(args.output_dir)
|
||||
|
||||
# when doing serializing build, all ranks share one engine
|
||||
builder = Builder()
|
||||
|
||||
cache = None
|
||||
for cur_rank in range(args.world_size):
|
||||
# skip other ranks if parallel_build is enabled
|
||||
if args.parallel_build and cur_rank != rank:
|
||||
continue
|
||||
|
||||
builder_config = builder.create_builder_config(
|
||||
name=MODEL_NAME,
|
||||
precision=args.dtype,
|
||||
timing_cache=args.timing_cache if cache is None else cache,
|
||||
tensor_parallel=args.world_size, # TP only
|
||||
parallel_build=args.parallel_build,
|
||||
num_layers=args.n_layer,
|
||||
num_heads=args.n_head,
|
||||
hidden_size=args.n_embd,
|
||||
inter_size=args.n_embd * 4,
|
||||
vocab_size=args.vocab_size,
|
||||
hidden_act=args.hidden_act,
|
||||
max_position_embeddings=args.n_positions,
|
||||
max_batch_size=args.max_batch_size,
|
||||
max_input_len=args.max_input_len,
|
||||
max_output_len=args.max_output_len,
|
||||
max_num_tokens=args.max_num_tokens,
|
||||
fp8=args.enable_fp8,
|
||||
quant_mode=args.quant_mode,
|
||||
strongly_typed=args.strongly_typed)
|
||||
|
||||
engine_name = get_engine_name(MODEL_NAME, args.dtype, args.world_size,
|
||||
cur_rank)
|
||||
engine = build_rank_engine(builder, builder_config, engine_name,
|
||||
cur_rank, args)
|
||||
assert engine is not None, f'Failed to build engine for rank {cur_rank}'
|
||||
|
||||
# if cur_rank == 0:
|
||||
# # Use in-memory timing cache for multiple builder passes.
|
||||
# if not args.parallel_build:
|
||||
# cache = builder_config.xtrt_builder_config.get_timing_cache()
|
||||
|
||||
serialize_engine(engine, os.path.join(args.output_dir, engine_name))
|
||||
|
||||
# if rank == 0:
|
||||
# ok = builder.save_timing_cache(
|
||||
# builder_config, os.path.join(args.output_dir, "model.cache"))
|
||||
# assert ok, "Failed to save timing cache."
|
||||
|
||||
|
||||
def run_build(args=None):
|
||||
args = parse_arguments(args)
|
||||
tik = time.time()
|
||||
if args.parallel_build and args.world_size > 1 and \
|
||||
torch.cuda.device_count() >= args.world_size:
|
||||
logger.warning(
|
||||
f'Parallelly build TensorRT engines. Please make sure that all of the {args.world_size} GPUs are totally free.'
|
||||
)
|
||||
mp.spawn(build, nprocs=args.world_size, args=(args, ))
|
||||
else:
|
||||
args.parallel_build = False
|
||||
logger.info('Serially build TensorRT engines.')
|
||||
build(0, args)
|
||||
|
||||
tok = time.time()
|
||||
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
|
||||
logger.info(f'Total time of building all {args.world_size} engines: {t}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_build()
|
||||
Reference in New Issue
Block a user