This repository has been archived on 2025-08-26. You can view files and clone it, but cannot push or open issues or pull requests.
Files
enginex_bi_series-sherpa-onnx/scripts/kokoro/v0.19/dynamic_quantization.py
Fangjun Kuang 59d118c256 Refactor kokoro export (#2302)
- generate samples for https://k2-fsa.github.io/sherpa/onnx/tts/all/
- provide int8 model for kokoro v0.19 kokoro-int8-en-v0_19.tar.bz2
2025-06-18 20:30:10 +08:00

48 lines
1.1 KiB
Python
Executable File

#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
from pathlib import Path
import onnxruntime
from onnxruntime.quantization import QuantType, quantize_dynamic
def show(filename):
session_opts = onnxruntime.SessionOptions()
session_opts.log_severity_level = 3
sess = onnxruntime.InferenceSession(filename, session_opts)
for i in sess.get_inputs():
print(i)
print("-----")
for i in sess.get_outputs():
print(i)
"""
NodeArg(name='tokens', type='tensor(int64)', shape=[1, 'tokens1'])
NodeArg(name='style', type='tensor(float)', shape=[1, 256])
NodeArg(name='speed', type='tensor(float)', shape=[1])
-----
NodeArg(name='audio', type='tensor(float)', shape=['audio0'])
"""
def main():
show("./model.onnx")
if not Path("./model.int8.onnx").is_file():
quantize_dynamic(
model_input="model.onnx",
model_output="model.int8.onnx",
# op_types_to_quantize=["MatMul"],
weight_type=QuantType.QUInt8,
)
else:
print("./model.int8.onnx exists - skip")
if __name__ == "__main__":
main()