Add C++ runtime and Python API for NeMo Canary models (#2352)
This commit is contained in:
@@ -9,6 +9,7 @@ set(srcs
|
||||
features.cc
|
||||
homophone-replacer.cc
|
||||
keyword-spotter.cc
|
||||
offline-canary-model-config.cc
|
||||
offline-ctc-fst-decoder-config.cc
|
||||
offline-dolphin-model-config.cc
|
||||
offline-fire-red-asr-model-config.cc
|
||||
|
||||
30
sherpa-onnx/python/csrc/offline-canary-model-config.cc
Normal file
30
sherpa-onnx/python/csrc/offline-canary-model-config.cc
Normal file
@@ -0,0 +1,30 @@
|
||||
// sherpa-onnx/python/csrc/offline-canary-model-config.cc
|
||||
//
|
||||
// Copyright (c) 2025 Xiaomi Corporation
|
||||
|
||||
#include "sherpa-onnx/csrc/offline-canary-model-config.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "sherpa-onnx/python/csrc/offline-canary-model-config.h"
|
||||
|
||||
namespace sherpa_onnx {
|
||||
|
||||
void PybindOfflineCanaryModelConfig(py::module *m) {
|
||||
using PyClass = OfflineCanaryModelConfig;
|
||||
py::class_<PyClass>(*m, "OfflineCanaryModelConfig")
|
||||
.def(py::init<const std::string &, const std::string &,
|
||||
const std::string &, const std::string &, bool>(),
|
||||
py::arg("encoder") = "", py::arg("decoder") = "",
|
||||
py::arg("src_lang") = "", py::arg("tgt_lang") = "",
|
||||
py::arg("use_pnc") = true)
|
||||
.def_readwrite("encoder", &PyClass::encoder)
|
||||
.def_readwrite("decoder", &PyClass::decoder)
|
||||
.def_readwrite("src_lang", &PyClass::src_lang)
|
||||
.def_readwrite("tgt_lang", &PyClass::tgt_lang)
|
||||
.def_readwrite("use_pnc", &PyClass::use_pnc)
|
||||
.def("__str__", &PyClass::ToString);
|
||||
}
|
||||
|
||||
} // namespace sherpa_onnx
|
||||
16
sherpa-onnx/python/csrc/offline-canary-model-config.h
Normal file
16
sherpa-onnx/python/csrc/offline-canary-model-config.h
Normal file
@@ -0,0 +1,16 @@
|
||||
// sherpa-onnx/python/csrc/offline-canary-model-config.h
|
||||
//
|
||||
// Copyright (c) 2025 Xiaomi Corporation
|
||||
|
||||
#ifndef SHERPA_ONNX_PYTHON_CSRC_OFFLINE_CANARY_MODEL_CONFIG_H_
|
||||
#define SHERPA_ONNX_PYTHON_CSRC_OFFLINE_CANARY_MODEL_CONFIG_H_
|
||||
|
||||
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
|
||||
|
||||
namespace sherpa_onnx {
|
||||
|
||||
void PybindOfflineCanaryModelConfig(py::module *m);
|
||||
|
||||
}
|
||||
|
||||
#endif // SHERPA_ONNX_PYTHON_CSRC_OFFLINE_CANARY_MODEL_CONFIG_H_
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "sherpa-onnx/csrc/offline-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/offline-canary-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/offline-dolphin-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/offline-fire-red-asr-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/offline-moonshine-model-config.h"
|
||||
@@ -34,6 +35,7 @@ void PybindOfflineModelConfig(py::module *m) {
|
||||
PybindOfflineSenseVoiceModelConfig(m);
|
||||
PybindOfflineMoonshineModelConfig(m);
|
||||
PybindOfflineDolphinModelConfig(m);
|
||||
PybindOfflineCanaryModelConfig(m);
|
||||
|
||||
using PyClass = OfflineModelConfig;
|
||||
py::class_<PyClass>(*m, "OfflineModelConfig")
|
||||
@@ -47,7 +49,8 @@ void PybindOfflineModelConfig(py::module *m) {
|
||||
const OfflineWenetCtcModelConfig &,
|
||||
const OfflineSenseVoiceModelConfig &,
|
||||
const OfflineMoonshineModelConfig &,
|
||||
const OfflineDolphinModelConfig &, const std::string &,
|
||||
const OfflineDolphinModelConfig &,
|
||||
const OfflineCanaryModelConfig &, const std::string &,
|
||||
const std::string &, int32_t, bool, const std::string &,
|
||||
const std::string &, const std::string &,
|
||||
const std::string &>(),
|
||||
@@ -62,8 +65,9 @@ void PybindOfflineModelConfig(py::module *m) {
|
||||
py::arg("sense_voice") = OfflineSenseVoiceModelConfig(),
|
||||
py::arg("moonshine") = OfflineMoonshineModelConfig(),
|
||||
py::arg("dolphin") = OfflineDolphinModelConfig(),
|
||||
py::arg("telespeech_ctc") = "", py::arg("tokens"),
|
||||
py::arg("num_threads"), py::arg("debug") = false,
|
||||
py::arg("canary") = OfflineCanaryModelConfig(),
|
||||
py::arg("telespeech_ctc") = "", py::arg("tokens") = "",
|
||||
py::arg("num_threads") = 1, py::arg("debug") = false,
|
||||
py::arg("provider") = "cpu", py::arg("model_type") = "",
|
||||
py::arg("modeling_unit") = "cjkchar", py::arg("bpe_vocab") = "")
|
||||
.def_readwrite("transducer", &PyClass::transducer)
|
||||
@@ -77,6 +81,7 @@ void PybindOfflineModelConfig(py::module *m) {
|
||||
.def_readwrite("sense_voice", &PyClass::sense_voice)
|
||||
.def_readwrite("moonshine", &PyClass::moonshine)
|
||||
.def_readwrite("dolphin", &PyClass::dolphin)
|
||||
.def_readwrite("canary", &PyClass::canary)
|
||||
.def_readwrite("telespeech_ctc", &PyClass::telespeech_ctc)
|
||||
.def_readwrite("tokens", &PyClass::tokens)
|
||||
.def_readwrite("num_threads", &PyClass::num_threads)
|
||||
|
||||
@@ -19,7 +19,8 @@ static void PybindOfflineRecognizerConfig(py::module *m) {
|
||||
const std::string &, int32_t, const std::string &, float,
|
||||
float, const std::string &, const std::string &,
|
||||
const HomophoneReplacerConfig &>(),
|
||||
py::arg("feat_config"), py::arg("model_config"),
|
||||
py::arg("feat_config") = FeatureExtractorConfig(),
|
||||
py::arg("model_config") = OfflineModelConfig(),
|
||||
py::arg("lm_config") = OfflineLMConfig(),
|
||||
py::arg("ctc_fst_decoder_config") = OfflineCtcFstDecoderConfig(),
|
||||
py::arg("decoding_method") = "greedy_search",
|
||||
@@ -61,6 +62,8 @@ void PybindOfflineRecognizer(py::module *m) {
|
||||
py::arg("hotwords"), py::call_guard<py::gil_scoped_release>())
|
||||
.def("decode_stream", &PyClass::DecodeStream, py::arg("s"),
|
||||
py::call_guard<py::gil_scoped_release>())
|
||||
.def("set_config", &PyClass::SetConfig, py::arg("config"),
|
||||
py::call_guard<py::gil_scoped_release>())
|
||||
.def(
|
||||
"decode_streams",
|
||||
[](const PyClass &self, std::vector<OfflineStream *> ss) {
|
||||
|
||||
@@ -8,9 +8,22 @@ from _sherpa_onnx import (
|
||||
DenoisedAudio,
|
||||
FastClustering,
|
||||
FastClusteringConfig,
|
||||
FeatureExtractorConfig,
|
||||
HomophoneReplacerConfig,
|
||||
OfflineCanaryModelConfig,
|
||||
OfflineCtcFstDecoderConfig,
|
||||
OfflineDolphinModelConfig,
|
||||
OfflineFireRedAsrModelConfig,
|
||||
OfflineLMConfig,
|
||||
OfflineModelConfig,
|
||||
OfflineMoonshineModelConfig,
|
||||
OfflineNemoEncDecCtcModelConfig,
|
||||
OfflineParaformerModelConfig,
|
||||
OfflinePunctuation,
|
||||
OfflinePunctuationConfig,
|
||||
OfflinePunctuationModelConfig,
|
||||
OfflineRecognizerConfig,
|
||||
OfflineSenseVoiceModelConfig,
|
||||
OfflineSourceSeparation,
|
||||
OfflineSourceSeparationConfig,
|
||||
OfflineSourceSeparationModelConfig,
|
||||
@@ -27,13 +40,18 @@ from _sherpa_onnx import (
|
||||
OfflineSpeechDenoiserGtcrnModelConfig,
|
||||
OfflineSpeechDenoiserModelConfig,
|
||||
OfflineStream,
|
||||
OfflineTdnnModelConfig,
|
||||
OfflineTransducerModelConfig,
|
||||
OfflineTts,
|
||||
OfflineTtsConfig,
|
||||
OfflineTtsKokoroModelConfig,
|
||||
OfflineTtsMatchaModelConfig,
|
||||
OfflineTtsModelConfig,
|
||||
OfflineTtsVitsModelConfig,
|
||||
OfflineWenetCtcModelConfig,
|
||||
OfflineWhisperModelConfig,
|
||||
OfflineZipformerAudioTaggingModelConfig,
|
||||
OfflineZipformerCtcModelConfig,
|
||||
OnlinePunctuation,
|
||||
OnlinePunctuationConfig,
|
||||
OnlinePunctuationModelConfig,
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import List, Optional
|
||||
from _sherpa_onnx import (
|
||||
FeatureExtractorConfig,
|
||||
HomophoneReplacerConfig,
|
||||
OfflineCanaryModelConfig,
|
||||
OfflineCtcFstDecoderConfig,
|
||||
OfflineDolphinModelConfig,
|
||||
OfflineFireRedAsrModelConfig,
|
||||
@@ -425,7 +426,6 @@ class OfflineRecognizer(object):
|
||||
num_threads=num_threads,
|
||||
debug=debug,
|
||||
provider=provider,
|
||||
model_type="nemo_ctc",
|
||||
)
|
||||
|
||||
feat_config = FeatureExtractorConfig(
|
||||
@@ -690,6 +690,102 @@ class OfflineRecognizer(object):
|
||||
self.config = recognizer_config
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_nemo_canary(
|
||||
cls,
|
||||
encoder: str,
|
||||
decoder: str,
|
||||
tokens: str,
|
||||
src_lang: str = "en",
|
||||
tgt_lang: str = "en",
|
||||
num_threads: int = 1,
|
||||
sample_rate: int = 16000,
|
||||
feature_dim: int = 128, # not used
|
||||
decoding_method: str = "greedy_search", # not used
|
||||
debug: bool = False,
|
||||
provider: str = "cpu",
|
||||
rule_fsts: str = "",
|
||||
rule_fars: str = "",
|
||||
hr_dict_dir: str = "",
|
||||
hr_rule_fsts: str = "",
|
||||
hr_lexicon: str = "",
|
||||
):
|
||||
"""
|
||||
Please refer to
|
||||
`<https://k2-fsa.github.io/sherpa/onnx/nemo/index.html>`_
|
||||
to download pre-trained models for different languages.
|
||||
|
||||
Args:
|
||||
encoder:
|
||||
Path to ``encoder.onnx`` or ``encoder.int8.onnx``.
|
||||
decoder:
|
||||
Path to ``decoder.onnx`` or ``decoder.int8.onnx``.
|
||||
tokens:
|
||||
Path to ``tokens.txt``. Each line in ``tokens.txt`` contains two
|
||||
columns::
|
||||
|
||||
symbol integer_id
|
||||
|
||||
src_lang:
|
||||
The language of the input audio. Valid values are: en, es, de, fr.
|
||||
If you leave it empty, it uses en internally.
|
||||
tgt_lang:
|
||||
The language of the output text. Valid values are: en, es, de, fr.
|
||||
If you leave it empty, it uses en internally.
|
||||
num_threads:
|
||||
Number of threads for neural network computation.
|
||||
sample_rate:
|
||||
Sample rate of the training data used to train the model. Not used
|
||||
feature_dim:
|
||||
Dimension of the feature used to train the model. Not used
|
||||
decoding_method:
|
||||
Valid values are greedy_search. Not used
|
||||
debug:
|
||||
True to show debug messages.
|
||||
provider:
|
||||
onnxruntime execution providers. Valid values are: cpu, cuda, coreml.
|
||||
rule_fsts:
|
||||
If not empty, it specifies fsts for inverse text normalization.
|
||||
If there are multiple fsts, they are separated by a comma.
|
||||
rule_fars:
|
||||
If not empty, it specifies fst archives for inverse text normalization.
|
||||
If there are multiple archives, they are separated by a comma.
|
||||
"""
|
||||
self = cls.__new__(cls)
|
||||
model_config = OfflineModelConfig(
|
||||
canary=OfflineCanaryModelConfig(
|
||||
encoder=encoder,
|
||||
decoder=decoder,
|
||||
src_lang=src_lang,
|
||||
tgt_lang=tgt_lang,
|
||||
),
|
||||
tokens=tokens,
|
||||
num_threads=num_threads,
|
||||
debug=debug,
|
||||
provider=provider,
|
||||
)
|
||||
|
||||
feat_config = FeatureExtractorConfig(
|
||||
sampling_rate=sample_rate,
|
||||
feature_dim=feature_dim,
|
||||
)
|
||||
|
||||
recognizer_config = OfflineRecognizerConfig(
|
||||
feat_config=feat_config,
|
||||
model_config=model_config,
|
||||
decoding_method=decoding_method,
|
||||
rule_fsts=rule_fsts,
|
||||
rule_fars=rule_fars,
|
||||
hr=HomophoneReplacerConfig(
|
||||
dict_dir=hr_dict_dir,
|
||||
lexicon=hr_lexicon,
|
||||
rule_fsts=hr_rule_fsts,
|
||||
),
|
||||
)
|
||||
self.recognizer = _Recognizer(recognizer_config)
|
||||
self.config = recognizer_config
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_whisper(
|
||||
cls,
|
||||
|
||||
Reference in New Issue
Block a user