This repository has been archived on 2025-08-26. You can view files and clone it, but cannot push or open issues or pull requests.
Files
enginex_bi_series-sherpa-onnx/scripts/mobile-asr-models/dynamic_quantization.py

56 lines
1.1 KiB
Python
Executable File

#!/usr/bin/env python3
import argparse
import onnxruntime
from onnxruntime.quantization import QuantType, quantize_dynamic
def show(filename):
session_opts = onnxruntime.SessionOptions()
session_opts.log_severity_level = 3
sess = onnxruntime.InferenceSession(filename, session_opts)
for i in sess.get_inputs():
print(i)
print("-----")
for i in sess.get_outputs():
print(i)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
required=True,
help="Input onnx model",
)
parser.add_argument(
"--output",
type=str,
required=True,
help="Output onnx model",
)
return parser.parse_args()
def main():
args = get_args()
print(vars(args))
print(f"----------{args.input}----------")
show(args.input)
print("------------------------------")
quantize_dynamic(
model_input=args.input,
model_output=args.output,
op_types_to_quantize=["MatMul"],
weight_type=QuantType.QInt8,
)
if __name__ == "__main__":
main()