Provide models for mobile-only platforms by fixing batch size to 1 (#1276)

This commit is contained in:
Fangjun Kuang
2024-08-22 19:36:24 +08:00
committed by GitHub
parent d8001d6edc
commit 0e0d04a97a
6 changed files with 287 additions and 0 deletions

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env python3
import argparse
from onnxruntime.quantization import QuantType, quantize_dynamic
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
required=True,
help="Input onnx model",
)
parser.add_argument(
"--output",
type=str,
required=True,
help="Output onnx model",
)
return parser.parse_args()
def main():
args = get_args()
print(vars(args))
quantize_dynamic(
model_input=args.input,
model_output=args.output,
op_types_to_quantize=["MatMul"],
weight_type=QuantType.QInt8,
)
if __name__ == "__main__":
main()