Support GigaAM CTC models for Russian ASR (#1464)

See also https://github.com/salute-developers/GigaAM
This commit is contained in:
Fangjun Kuang
2024-10-25 10:55:16 +08:00
committed by GitHub
parent 2b40079faf
commit b41f6d2c94
24 changed files with 641 additions and 160 deletions

View File

@@ -0,0 +1,10 @@
# Introduction
This folder contains scripts for converting models from
https://github.com/salute-developers/GigaAM
to sherpa-onnx.
The ASR models are for Russian speech recognition in this folder.
Please see the license of the models at
https://github.com/salute-developers/GigaAM/blob/main/GigaAM%20License_NC.pdf

View File

@@ -0,0 +1,114 @@
#!/usr/bin/env python3
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
from typing import Dict
import onnx
import torch
import torchaudio
from nemo.collections.asr.models import EncDecCTCModel
from nemo.collections.asr.modules.audio_preprocessing import (
AudioToMelSpectrogramPreprocessor as NeMoAudioToMelSpectrogramPreprocessor,
)
from nemo.collections.asr.parts.preprocessing.features import (
FilterbankFeaturesTA as NeMoFilterbankFeaturesTA,
)
from onnxruntime.quantization import QuantType, quantize_dynamic
class FilterbankFeaturesTA(NeMoFilterbankFeaturesTA):
def __init__(self, mel_scale: str = "htk", wkwargs=None, **kwargs):
if "window_size" in kwargs:
del kwargs["window_size"]
if "window_stride" in kwargs:
del kwargs["window_stride"]
super().__init__(**kwargs)
self._mel_spec_extractor: torchaudio.transforms.MelSpectrogram = (
torchaudio.transforms.MelSpectrogram(
sample_rate=self._sample_rate,
win_length=self.win_length,
hop_length=self.hop_length,
n_mels=kwargs["nfilt"],
window_fn=self.torch_windows[kwargs["window"]],
mel_scale=mel_scale,
norm=kwargs["mel_norm"],
n_fft=kwargs["n_fft"],
f_max=kwargs.get("highfreq", None),
f_min=kwargs.get("lowfreq", 0),
wkwargs=wkwargs,
)
)
class AudioToMelSpectrogramPreprocessor(NeMoAudioToMelSpectrogramPreprocessor):
def __init__(self, mel_scale: str = "htk", **kwargs):
super().__init__(**kwargs)
kwargs["nfilt"] = kwargs["features"]
del kwargs["features"]
self.featurizer = (
FilterbankFeaturesTA( # Deprecated arguments; kept for config compatibility
mel_scale=mel_scale,
**kwargs,
)
)
def add_meta_data(filename: str, meta_data: Dict[str, str]):
"""Add meta data to an ONNX model. It is changed in-place.
Args:
filename:
Filename of the ONNX model to be changed.
meta_data:
Key-value pairs.
"""
model = onnx.load(filename)
while len(model.metadata_props):
model.metadata_props.pop()
for key, value in meta_data.items():
meta = model.metadata_props.add()
meta.key = key
meta.value = str(value)
onnx.save(model, filename)
def main():
model = EncDecCTCModel.from_config_file("./ctc_model_config.yaml")
ckpt = torch.load("./ctc_model_weights.ckpt", map_location="cpu")
model.load_state_dict(ckpt, strict=False)
model.eval()
with open("tokens.txt", "w", encoding="utf-8") as f:
for i, t in enumerate(model.cfg.labels):
f.write(f"{t} {i}\n")
f.write(f"<blk> {i+1}\n")
filename = "model.onnx"
model.export(filename)
meta_data = {
"vocab_size": len(model.cfg.labels) + 1,
"normalize_type": "",
"subsampling_factor": 4,
"model_type": "EncDecCTCModel",
"version": "1",
"model_author": "https://github.com/salute-developers/GigaAM",
"license": "https://github.com/salute-developers/GigaAM/blob/main/GigaAM%20License_NC.pdf",
"language": "Russian",
"is_giga_am": 1,
}
add_meta_data(filename, meta_data)
filename_int8 = "model.int8.onnx"
quantize_dynamic(
model_input=filename,
model_output=filename_int8,
weight_type=QuantType.QUInt8,
)
if __name__ == "__main__":
main()

36
scripts/nemo/GigaAM/run-ctc.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env bash
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
set -ex
function install_nemo() {
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python3 get-pip.py
pip install torch==2.4.0 torchaudio==2.4.0 -f https://download.pytorch.org/whl/torch_stable.html
pip install -qq wget text-unidecode matplotlib>=3.3.2 onnx onnxruntime pybind11 Cython einops kaldi-native-fbank soundfile librosa
pip install -qq ipython
# sudo apt-get install -q -y sox libsndfile1 ffmpeg python3-pip ipython
BRANCH='main'
python3 -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
pip install numpy==1.26.4
}
function download_files() {
curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/ctc_model_weights.ckpt
curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/ctc_model_config.yaml
curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/example.wav
curl -SL -O https://n-ws-q0bez.s3pd12.sbercloud.ru/b-ws-q0bez-jpv/GigaAM/long_example.wav
curl -SL -O https://huggingface.co/csukuangfj/tmp-files/resolve/main/GigaAM%20License_NC.pdf
}
install_nemo
download_files
python3 ./export-onnx-ctc.py
ls -lh
python3 ./test-onnx-ctc.py

View File

@@ -0,0 +1,157 @@
#!/usr/bin/env python3
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
# https://github.com/salute-developers/GigaAM
import kaldi_native_fbank as knf
import librosa
import numpy as np
import onnxruntime as ort
import soundfile as sf
import torch
def create_fbank():
opts = knf.FbankOptions()
opts.frame_opts.dither = 0
opts.frame_opts.remove_dc_offset = False
opts.frame_opts.preemph_coeff = 0
opts.frame_opts.window_type = "hann"
# Even though GigaAM uses 400 for fft, here we use 512
# since kaldi-native-fbank only support fft for power of 2.
opts.frame_opts.round_to_power_of_two = True
opts.mel_opts.low_freq = 0
opts.mel_opts.high_freq = 8000
opts.mel_opts.num_bins = 64
fbank = knf.OnlineFbank(opts)
return fbank
def compute_features(audio, fbank) -> np.ndarray:
"""
Args:
audio: (num_samples,), np.float32
fbank: the fbank extractor
Returns:
features: (num_frames, feat_dim), np.float32
"""
assert len(audio.shape) == 1, audio.shape
fbank.accept_waveform(16000, audio)
ans = []
processed = 0
while processed < fbank.num_frames_ready:
ans.append(np.array(fbank.get_frame(processed)))
processed += 1
ans = np.stack(ans)
return ans
def display(sess):
print("==========Input==========")
for i in sess.get_inputs():
print(i)
print("==========Output==========")
for i in sess.get_outputs():
print(i)
"""
==========Input==========
NodeArg(name='audio_signal', type='tensor(float)', shape=['audio_signal_dynamic_axes_1', 64, 'audio_signal_dynamic_axes_2'])
NodeArg(name='length', type='tensor(int64)', shape=['length_dynamic_axes_1'])
==========Output==========
NodeArg(name='logprobs', type='tensor(float)', shape=['logprobs_dynamic_axes_1', 'logprobs_dynamic_axes_2', 34])
"""
class OnnxModel:
def __init__(
self,
filename: str,
):
session_opts = ort.SessionOptions()
session_opts.inter_op_num_threads = 1
session_opts.intra_op_num_threads = 1
self.model = ort.InferenceSession(
filename,
sess_options=session_opts,
providers=["CPUExecutionProvider"],
)
display(self.model)
def __call__(self, x: np.ndarray):
# x: (T, C)
x = torch.from_numpy(x)
x = x.t().unsqueeze(0)
# x: [1, C, T]
x_lens = torch.tensor([x.shape[-1]], dtype=torch.int64)
log_probs = self.model.run(
[
self.model.get_outputs()[0].name,
],
{
self.model.get_inputs()[0].name: x.numpy(),
self.model.get_inputs()[1].name: x_lens.numpy(),
},
)[0]
# [batch_size, T, dim]
return log_probs
def main():
filename = "./model.int8.onnx"
tokens = "./tokens.txt"
wav = "./example.wav"
model = OnnxModel(filename)
id2token = dict()
with open(tokens, encoding="utf-8") as f:
for line in f:
fields = line.split()
if len(fields) == 1:
id2token[int(fields[0])] = " "
else:
t, idx = fields
id2token[int(idx)] = t
fbank = create_fbank()
audio, sample_rate = sf.read(wav, dtype="float32", always_2d=True)
audio = audio[:, 0] # only use the first channel
if sample_rate != 16000:
audio = librosa.resample(
audio,
orig_sr=sample_rate,
target_sr=16000,
)
sample_rate = 16000
features = compute_features(audio, fbank)
print("features.shape", features.shape)
blank = len(id2token) - 1
prev = -1
ans = []
log_probs = model(features)
print("log_probs", log_probs.shape)
log_probs = torch.from_numpy(log_probs)[0]
ids = torch.argmax(log_probs, dim=1).tolist()
for i in ids:
if i != blank and i != prev:
ans.append(i)
prev = i
tokens = [id2token[i] for i in ans]
text = "".join(tokens)
print(wav)
print(text)
if __name__ == "__main__":
main()