119 lines
3.6 KiB
Python
Executable File
119 lines
3.6 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
|
|
from typing import Dict
|
|
|
|
import onnx
|
|
import torch
|
|
import torchaudio
|
|
from nemo.collections.asr.models import EncDecCTCModel
|
|
from nemo.collections.asr.modules.audio_preprocessing import (
|
|
AudioToMelSpectrogramPreprocessor as NeMoAudioToMelSpectrogramPreprocessor,
|
|
)
|
|
from nemo.collections.asr.parts.preprocessing.features import (
|
|
FilterbankFeaturesTA as NeMoFilterbankFeaturesTA,
|
|
)
|
|
from onnxruntime.quantization import QuantType, quantize_dynamic
|
|
|
|
|
|
class FilterbankFeaturesTA(NeMoFilterbankFeaturesTA):
|
|
def __init__(self, mel_scale: str = "htk", wkwargs=None, **kwargs):
|
|
if "window_size" in kwargs:
|
|
del kwargs["window_size"]
|
|
if "window_stride" in kwargs:
|
|
del kwargs["window_stride"]
|
|
|
|
super().__init__(**kwargs)
|
|
|
|
self._mel_spec_extractor: torchaudio.transforms.MelSpectrogram = (
|
|
torchaudio.transforms.MelSpectrogram(
|
|
sample_rate=self._sample_rate,
|
|
win_length=self.win_length,
|
|
hop_length=self.hop_length,
|
|
n_mels=kwargs["nfilt"],
|
|
window_fn=self.torch_windows[kwargs["window"]],
|
|
mel_scale=mel_scale,
|
|
norm=kwargs["mel_norm"],
|
|
n_fft=kwargs["n_fft"],
|
|
f_max=kwargs.get("highfreq", None),
|
|
f_min=kwargs.get("lowfreq", 0),
|
|
wkwargs=wkwargs,
|
|
)
|
|
)
|
|
|
|
|
|
class AudioToMelSpectrogramPreprocessor(NeMoAudioToMelSpectrogramPreprocessor):
|
|
def __init__(self, mel_scale: str = "htk", **kwargs):
|
|
super().__init__(**kwargs)
|
|
kwargs["nfilt"] = kwargs["features"]
|
|
del kwargs["features"]
|
|
self.featurizer = (
|
|
FilterbankFeaturesTA( # Deprecated arguments; kept for config compatibility
|
|
mel_scale=mel_scale,
|
|
**kwargs,
|
|
)
|
|
)
|
|
|
|
|
|
def add_meta_data(filename: str, meta_data: Dict[str, str]):
|
|
"""Add meta data to an ONNX model. It is changed in-place.
|
|
|
|
Args:
|
|
filename:
|
|
Filename of the ONNX model to be changed.
|
|
meta_data:
|
|
Key-value pairs.
|
|
"""
|
|
model = onnx.load(filename)
|
|
while len(model.metadata_props):
|
|
model.metadata_props.pop()
|
|
|
|
for key, value in meta_data.items():
|
|
meta = model.metadata_props.add()
|
|
meta.key = key
|
|
meta.value = str(value)
|
|
|
|
onnx.save(model, filename)
|
|
|
|
|
|
@torch.no_grad()
|
|
def main():
|
|
model = EncDecCTCModel.from_config_file("./ctc_model_config.yaml")
|
|
ckpt = torch.load("./ctc_model_weights.ckpt", map_location="cpu")
|
|
model.load_state_dict(ckpt, strict=False)
|
|
model.eval()
|
|
|
|
# use characters
|
|
# space is 0
|
|
# <blk> is the last token
|
|
with open("tokens.txt", "w", encoding="utf-8") as f:
|
|
for i, t in enumerate(model.cfg.labels):
|
|
f.write(f"{t} {i}\n")
|
|
f.write(f"<blk> {i+1}\n")
|
|
|
|
filename = "model.onnx"
|
|
model.export(filename)
|
|
|
|
meta_data = {
|
|
"vocab_size": len(model.cfg.labels) + 1,
|
|
"normalize_type": "",
|
|
"subsampling_factor": 4,
|
|
"model_type": "EncDecCTCModel",
|
|
"version": "1",
|
|
"model_author": "https://github.com/salute-developers/GigaAM",
|
|
"license": "https://github.com/salute-developers/GigaAM/blob/main/GigaAM%20License_NC.pdf",
|
|
"language": "Russian",
|
|
"is_giga_am": 1,
|
|
}
|
|
add_meta_data(filename, meta_data)
|
|
|
|
filename_int8 = "model.int8.onnx"
|
|
quantize_dynamic(
|
|
model_input=filename,
|
|
model_output=filename_int8,
|
|
weight_type=QuantType.QUInt8,
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|