Add C++ runtime and Python API for NeMo Canary models (#2352)

This commit is contained in:
Fangjun Kuang
2025-07-07 17:03:49 +08:00
committed by GitHub
parent f8d957a24b
commit 0e738c356c
24 changed files with 1091 additions and 8 deletions

View File

@@ -6,6 +6,7 @@ from typing import List, Optional
from _sherpa_onnx import (
FeatureExtractorConfig,
HomophoneReplacerConfig,
OfflineCanaryModelConfig,
OfflineCtcFstDecoderConfig,
OfflineDolphinModelConfig,
OfflineFireRedAsrModelConfig,
@@ -425,7 +426,6 @@ class OfflineRecognizer(object):
num_threads=num_threads,
debug=debug,
provider=provider,
model_type="nemo_ctc",
)
feat_config = FeatureExtractorConfig(
@@ -690,6 +690,102 @@ class OfflineRecognizer(object):
self.config = recognizer_config
return self
@classmethod
def from_nemo_canary(
cls,
encoder: str,
decoder: str,
tokens: str,
src_lang: str = "en",
tgt_lang: str = "en",
num_threads: int = 1,
sample_rate: int = 16000,
feature_dim: int = 128, # not used
decoding_method: str = "greedy_search", # not used
debug: bool = False,
provider: str = "cpu",
rule_fsts: str = "",
rule_fars: str = "",
hr_dict_dir: str = "",
hr_rule_fsts: str = "",
hr_lexicon: str = "",
):
"""
Please refer to
`<https://k2-fsa.github.io/sherpa/onnx/nemo/index.html>`_
to download pre-trained models for different languages.
Args:
encoder:
Path to ``encoder.onnx`` or ``encoder.int8.onnx``.
decoder:
Path to ``decoder.onnx`` or ``decoder.int8.onnx``.
tokens:
Path to ``tokens.txt``. Each line in ``tokens.txt`` contains two
columns::
symbol integer_id
src_lang:
The language of the input audio. Valid values are: en, es, de, fr.
If you leave it empty, it uses en internally.
tgt_lang:
The language of the output text. Valid values are: en, es, de, fr.
If you leave it empty, it uses en internally.
num_threads:
Number of threads for neural network computation.
sample_rate:
Sample rate of the training data used to train the model. Not used
feature_dim:
Dimension of the feature used to train the model. Not used
decoding_method:
Valid values are greedy_search. Not used
debug:
True to show debug messages.
provider:
onnxruntime execution providers. Valid values are: cpu, cuda, coreml.
rule_fsts:
If not empty, it specifies fsts for inverse text normalization.
If there are multiple fsts, they are separated by a comma.
rule_fars:
If not empty, it specifies fst archives for inverse text normalization.
If there are multiple archives, they are separated by a comma.
"""
self = cls.__new__(cls)
model_config = OfflineModelConfig(
canary=OfflineCanaryModelConfig(
encoder=encoder,
decoder=decoder,
src_lang=src_lang,
tgt_lang=tgt_lang,
),
tokens=tokens,
num_threads=num_threads,
debug=debug,
provider=provider,
)
feat_config = FeatureExtractorConfig(
sampling_rate=sample_rate,
feature_dim=feature_dim,
)
recognizer_config = OfflineRecognizerConfig(
feat_config=feat_config,
model_config=model_config,
decoding_method=decoding_method,
rule_fsts=rule_fsts,
rule_fars=rule_fars,
hr=HomophoneReplacerConfig(
dict_dir=hr_dict_dir,
lexicon=hr_lexicon,
rule_fsts=hr_rule_fsts,
),
)
self.recognizer = _Recognizer(recognizer_config)
self.config = recognizer_config
return self
@classmethod
def from_whisper(
cls,