Refactor exporting NeMo models (#2362)

Refactors and extends model export support to include new NeMo Parakeet TDT int8 variants for English and Japanese, updating the Kotlin API, export scripts, test runners, and CI workflows.

- Added support for two new int8 model types in OfflineRecognizer.kt.
- Enhanced Python export scripts to perform dynamic quantization and metadata injection.
- Updated shell scripts and GitHub workflows to package, test, and publish int8 model artifacts.
This commit is contained in:
Fangjun Kuang
2025-07-09 16:02:12 +08:00
committed by GitHub
parent f1405779cf
commit 6122a678f5
19 changed files with 671 additions and 23 deletions

View File

@@ -0,0 +1,91 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import os
from typing import Dict
import nemo.collections.asr as nemo_asr
import onnx
import torch
from onnxruntime.quantization import QuantType, quantize_dynamic
def add_meta_data(filename: str, meta_data: Dict[str, str]):
"""Add meta data to an ONNX model. It is changed in-place.
Args:
filename:
Filename of the ONNX model to be changed.
meta_data:
Key-value pairs.
"""
model = onnx.load(filename)
while len(model.metadata_props):
model.metadata_props.pop()
for key, value in meta_data.items():
meta = model.metadata_props.add()
meta.key = key
meta.value = str(value)
onnx.save(model, filename)
@torch.no_grad()
def main():
asr_model = nemo_asr.models.ASRModel.from_pretrained(
model_name="nvidia/parakeet-tdt_ctc-0.6b-ja"
)
print(asr_model.cfg)
print(asr_model)
with open("./tokens.txt", "w", encoding="utf-8") as f:
for i, s in enumerate(asr_model.joint.vocabulary):
f.write(f"{s} {i}\n")
f.write(f"<blk> {i+1}\n")
print("Saved to tokens.txt")
decoder_type = "ctc"
asr_model.change_decoding_strategy(decoder_type=decoder_type)
asr_model.eval()
asr_model.set_export_config({"decoder_type": "ctc"})
filename = "model.onnx"
asr_model.export(filename, onnx_opset_version=18)
normalize_type = asr_model.cfg.preprocessor.normalize
if normalize_type == "NA":
normalize_type = ""
meta_data = {
"vocab_size": asr_model.decoder.vocab_size,
"normalize_type": normalize_type,
"subsampling_factor": 8,
"model_type": "EncDecHybridRNNTCTCBPEModel",
"version": "1",
"model_author": "NeMo",
"url": "https://huggingface.co/nvidia/parakeet-tdt_ctc-0.6b-ja",
"comment": "Only the CTC branch is exported",
"doc": "See https://huggingface.co/nvidia/parakeet-tdt_ctc-0.6b-ja",
}
os.system("ls -lh *.onnx")
quantize_dynamic(
model_input="./model.onnx",
model_output="./model.int8.onnx",
weight_type=QuantType.QUInt8,
)
add_meta_data("model.int8.onnx", meta_data)
os.system("ls -lh *.onnx")
print("preprocessor", asr_model.cfg.preprocessor)
print(meta_data)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -ex
python3 ./export-onnx-ctc.py
ls -lh *.onnx
mkdir -p test_wavs
pushd test_wavs
curl -SL -O https://huggingface.co/csukuangfj/reazonspeech-k2-v2-ja-en/resolve/main/test_wavs/transcripts.txt
curl -SL -O https://hf-mirror.com/csukuangfj/reazonspeech-k2-v2-ja-en/resolve/main/test_wavs/test_ja_1.wav
curl -SL -O https://hf-mirror.com/csukuangfj/reazonspeech-k2-v2-ja-en/resolve/main/test_wavs/test_ja_2.wav
popd
d=sherpa-onnx-nemo-parakeet-tdt_ctc-0.6b-ja-35000-int8
mkdir -p $d
mv -v model.int8.onnx $d/
cp -v tokens.txt $d/
cp -av test_wavs $d
ls -lh $d
d=sherpa-onnx-nemo-parakeet-tdt_ctc-0.6b-ja-35000-int8
python3 ./test-onnx-ctc-non-streaming.py \
--model $d/model.int8.onnx \
--tokens $d/tokens.txt \
--wav $d/test_wavs/test_ja_1.wav
python3 ./test-onnx-ctc-non-streaming.py \
--model $d/model.int8.onnx \
--tokens $d/tokens.txt \
--wav $d/test_wavs/test_ja_2.wav

View File

@@ -0,0 +1 @@
../fast-conformer-hybrid-transducer-ctc/test-onnx-ctc-non-streaming.py