Add C++ support for streaming NeMo CTC models. (#857)
This commit is contained in:
@@ -23,6 +23,7 @@ set(srcs
|
||||
online-ctc-fst-decoder-config.cc
|
||||
online-lm-config.cc
|
||||
online-model-config.cc
|
||||
online-nemo-ctc-model-config.cc
|
||||
online-paraformer-model-config.cc
|
||||
online-recognizer.cc
|
||||
online-stream.cc
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
|
||||
#include "sherpa-onnx/csrc/online-model-config.h"
|
||||
#include "sherpa-onnx/csrc/online-transducer-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/online-nemo-ctc-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/online-paraformer-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/online-transducer-model-config.h"
|
||||
#include "sherpa-onnx/python/csrc/online-wenet-ctc-model-config.h"
|
||||
@@ -21,26 +22,30 @@ void PybindOnlineModelConfig(py::module *m) {
|
||||
PybindOnlineParaformerModelConfig(m);
|
||||
PybindOnlineWenetCtcModelConfig(m);
|
||||
PybindOnlineZipformer2CtcModelConfig(m);
|
||||
PybindOnlineNeMoCtcModelConfig(m);
|
||||
|
||||
using PyClass = OnlineModelConfig;
|
||||
py::class_<PyClass>(*m, "OnlineModelConfig")
|
||||
.def(py::init<const OnlineTransducerModelConfig &,
|
||||
const OnlineParaformerModelConfig &,
|
||||
const OnlineWenetCtcModelConfig &,
|
||||
const OnlineZipformer2CtcModelConfig &, const std::string &,
|
||||
const OnlineZipformer2CtcModelConfig &,
|
||||
const OnlineNeMoCtcModelConfig &, const std::string &,
|
||||
int32_t, int32_t, bool, const std::string &,
|
||||
const std::string &>(),
|
||||
py::arg("transducer") = OnlineTransducerModelConfig(),
|
||||
py::arg("paraformer") = OnlineParaformerModelConfig(),
|
||||
py::arg("wenet_ctc") = OnlineWenetCtcModelConfig(),
|
||||
py::arg("zipformer2_ctc") = OnlineZipformer2CtcModelConfig(),
|
||||
py::arg("tokens"), py::arg("num_threads"), py::arg("warm_up") = 0,
|
||||
py::arg("nemo_ctc") = OnlineNeMoCtcModelConfig(), py::arg("tokens"),
|
||||
py::arg("num_threads"), py::arg("warm_up") = 0,
|
||||
py::arg("debug") = false, py::arg("provider") = "cpu",
|
||||
py::arg("model_type") = "")
|
||||
.def_readwrite("transducer", &PyClass::transducer)
|
||||
.def_readwrite("paraformer", &PyClass::paraformer)
|
||||
.def_readwrite("wenet_ctc", &PyClass::wenet_ctc)
|
||||
.def_readwrite("zipformer2_ctc", &PyClass::zipformer2_ctc)
|
||||
.def_readwrite("nemo_ctc", &PyClass::nemo_ctc)
|
||||
.def_readwrite("tokens", &PyClass::tokens)
|
||||
.def_readwrite("num_threads", &PyClass::num_threads)
|
||||
.def_readwrite("debug", &PyClass::debug)
|
||||
|
||||
22
sherpa-onnx/python/csrc/online-nemo-ctc-model-config.cc
Normal file
22
sherpa-onnx/python/csrc/online-nemo-ctc-model-config.cc
Normal file
@@ -0,0 +1,22 @@
|
||||
// sherpa-onnx/python/csrc/online-nemo-ctc-model-config.cc
|
||||
//
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
|
||||
#include "sherpa-onnx/python/csrc/online-nemo-ctc-model-config.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "sherpa-onnx/csrc/online-nemo-ctc-model-config.h"
|
||||
|
||||
namespace sherpa_onnx {
|
||||
|
||||
void PybindOnlineNeMoCtcModelConfig(py::module *m) {
|
||||
using PyClass = OnlineNeMoCtcModelConfig;
|
||||
py::class_<PyClass>(*m, "OnlineNeMoCtcModelConfig")
|
||||
.def(py::init<const std::string &>(), py::arg("model"))
|
||||
.def_readwrite("model", &PyClass::model)
|
||||
.def("__str__", &PyClass::ToString);
|
||||
}
|
||||
|
||||
} // namespace sherpa_onnx
|
||||
16
sherpa-onnx/python/csrc/online-nemo-ctc-model-config.h
Normal file
16
sherpa-onnx/python/csrc/online-nemo-ctc-model-config.h
Normal file
@@ -0,0 +1,16 @@
|
||||
// sherpa-onnx/python/csrc/online-nemo-ctc-model-config.h
|
||||
//
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
|
||||
#ifndef SHERPA_ONNX_PYTHON_CSRC_ONLINE_NEMO_CTC_MODEL_CONFIG_H_
|
||||
#define SHERPA_ONNX_PYTHON_CSRC_ONLINE_NEMO_CTC_MODEL_CONFIG_H_
|
||||
|
||||
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
|
||||
|
||||
namespace sherpa_onnx {
|
||||
|
||||
void PybindOnlineNeMoCtcModelConfig(py::module *m);
|
||||
|
||||
}
|
||||
|
||||
#endif // SHERPA_ONNX_PYTHON_CSRC_ONLINE_NEMO_CTC_MODEL_CONFIG_H_
|
||||
@@ -42,6 +42,8 @@ static void PybindOnlineRecognizerResult(py::module *m) {
|
||||
"segment", [](PyClass &self) -> int32_t { return self.segment; })
|
||||
.def_property_readonly(
|
||||
"is_final", [](PyClass &self) -> bool { return self.is_final; })
|
||||
.def("__str__", &PyClass::AsJsonString,
|
||||
py::call_guard<py::gil_scoped_release>())
|
||||
.def("as_json_string", &PyClass::AsJsonString,
|
||||
py::call_guard<py::gil_scoped_release>());
|
||||
}
|
||||
@@ -50,29 +52,17 @@ static void PybindOnlineRecognizerConfig(py::module *m) {
|
||||
using PyClass = OnlineRecognizerConfig;
|
||||
py::class_<PyClass>(*m, "OnlineRecognizerConfig")
|
||||
.def(
|
||||
py::init<const FeatureExtractorConfig &,
|
||||
const OnlineModelConfig &,
|
||||
const OnlineLMConfig &,
|
||||
const EndpointConfig &,
|
||||
const OnlineCtcFstDecoderConfig &,
|
||||
bool,
|
||||
const std::string &,
|
||||
int32_t,
|
||||
const std::string &,
|
||||
float,
|
||||
float,
|
||||
float>(),
|
||||
py::arg("feat_config"),
|
||||
py::arg("model_config"),
|
||||
py::init<const FeatureExtractorConfig &, const OnlineModelConfig &,
|
||||
const OnlineLMConfig &, const EndpointConfig &,
|
||||
const OnlineCtcFstDecoderConfig &, bool, const std::string &,
|
||||
int32_t, const std::string &, float, float, float>(),
|
||||
py::arg("feat_config"), py::arg("model_config"),
|
||||
py::arg("lm_config") = OnlineLMConfig(),
|
||||
py::arg("endpoint_config") = EndpointConfig(),
|
||||
py::arg("ctc_fst_decoder_config") = OnlineCtcFstDecoderConfig(),
|
||||
py::arg("enable_endpoint"),
|
||||
py::arg("decoding_method"),
|
||||
py::arg("max_active_paths") = 4,
|
||||
py::arg("hotwords_file") = "",
|
||||
py::arg("hotwords_score") = 0,
|
||||
py::arg("blank_penalty") = 0.0,
|
||||
py::arg("enable_endpoint"), py::arg("decoding_method"),
|
||||
py::arg("max_active_paths") = 4, py::arg("hotwords_file") = "",
|
||||
py::arg("hotwords_score") = 0, py::arg("blank_penalty") = 0.0,
|
||||
py::arg("temperature_scale") = 2.0)
|
||||
.def_readwrite("feat_config", &PyClass::feat_config)
|
||||
.def_readwrite("model_config", &PyClass::model_config)
|
||||
|
||||
@@ -12,9 +12,11 @@ from _sherpa_onnx import (
|
||||
from _sherpa_onnx import OnlineRecognizer as _Recognizer
|
||||
from _sherpa_onnx import (
|
||||
OnlineRecognizerConfig,
|
||||
OnlineRecognizerResult,
|
||||
OnlineStream,
|
||||
OnlineTransducerModelConfig,
|
||||
OnlineWenetCtcModelConfig,
|
||||
OnlineNeMoCtcModelConfig,
|
||||
OnlineZipformer2CtcModelConfig,
|
||||
OnlineCtcFstDecoderConfig,
|
||||
)
|
||||
@@ -59,6 +61,7 @@ class OnlineRecognizer(object):
|
||||
lm: str = "",
|
||||
lm_scale: float = 0.1,
|
||||
temperature_scale: float = 2.0,
|
||||
debug: bool = False,
|
||||
):
|
||||
"""
|
||||
Please refer to
|
||||
@@ -154,6 +157,7 @@ class OnlineRecognizer(object):
|
||||
num_threads=num_threads,
|
||||
provider=provider,
|
||||
model_type=model_type,
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
feat_config = FeatureExtractorConfig(
|
||||
@@ -220,6 +224,7 @@ class OnlineRecognizer(object):
|
||||
rule3_min_utterance_length: float = 20.0,
|
||||
decoding_method: str = "greedy_search",
|
||||
provider: str = "cpu",
|
||||
debug: bool = False,
|
||||
):
|
||||
"""
|
||||
Please refer to
|
||||
@@ -283,6 +288,7 @@ class OnlineRecognizer(object):
|
||||
num_threads=num_threads,
|
||||
provider=provider,
|
||||
model_type="paraformer",
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
feat_config = FeatureExtractorConfig(
|
||||
@@ -324,6 +330,7 @@ class OnlineRecognizer(object):
|
||||
ctc_graph: str = "",
|
||||
ctc_max_active: int = 3000,
|
||||
provider: str = "cpu",
|
||||
debug: bool = False,
|
||||
):
|
||||
"""
|
||||
Please refer to
|
||||
@@ -386,6 +393,7 @@ class OnlineRecognizer(object):
|
||||
tokens=tokens,
|
||||
num_threads=num_threads,
|
||||
provider=provider,
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
feat_config = FeatureExtractorConfig(
|
||||
@@ -417,6 +425,106 @@ class OnlineRecognizer(object):
|
||||
self.config = recognizer_config
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_nemo_ctc(
|
||||
cls,
|
||||
tokens: str,
|
||||
model: str,
|
||||
num_threads: int = 2,
|
||||
sample_rate: float = 16000,
|
||||
feature_dim: int = 80,
|
||||
enable_endpoint_detection: bool = False,
|
||||
rule1_min_trailing_silence: float = 2.4,
|
||||
rule2_min_trailing_silence: float = 1.2,
|
||||
rule3_min_utterance_length: float = 20.0,
|
||||
decoding_method: str = "greedy_search",
|
||||
provider: str = "cpu",
|
||||
debug: bool = False,
|
||||
):
|
||||
"""
|
||||
Please refer to
|
||||
`<https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models>`_
|
||||
to download pre-trained models.
|
||||
|
||||
Args:
|
||||
tokens:
|
||||
Path to ``tokens.txt``. Each line in ``tokens.txt`` contains two
|
||||
columns::
|
||||
|
||||
symbol integer_id
|
||||
|
||||
model:
|
||||
Path to ``model.onnx``.
|
||||
num_threads:
|
||||
Number of threads for neural network computation.
|
||||
sample_rate:
|
||||
Sample rate of the training data used to train the model.
|
||||
feature_dim:
|
||||
Dimension of the feature used to train the model.
|
||||
enable_endpoint_detection:
|
||||
True to enable endpoint detection. False to disable endpoint
|
||||
detection.
|
||||
rule1_min_trailing_silence:
|
||||
Used only when enable_endpoint_detection is True. If the duration
|
||||
of trailing silence in seconds is larger than this value, we assume
|
||||
an endpoint is detected.
|
||||
rule2_min_trailing_silence:
|
||||
Used only when enable_endpoint_detection is True. If we have decoded
|
||||
something that is nonsilence and if the duration of trailing silence
|
||||
in seconds is larger than this value, we assume an endpoint is
|
||||
detected.
|
||||
rule3_min_utterance_length:
|
||||
Used only when enable_endpoint_detection is True. If the utterance
|
||||
length in seconds is larger than this value, we assume an endpoint
|
||||
is detected.
|
||||
decoding_method:
|
||||
The only valid value is greedy_search.
|
||||
provider:
|
||||
onnxruntime execution providers. Valid values are: cpu, cuda, coreml.
|
||||
debug:
|
||||
True to show meta data in the model.
|
||||
"""
|
||||
self = cls.__new__(cls)
|
||||
_assert_file_exists(tokens)
|
||||
_assert_file_exists(model)
|
||||
|
||||
assert num_threads > 0, num_threads
|
||||
|
||||
nemo_ctc_config = OnlineNeMoCtcModelConfig(
|
||||
model=model,
|
||||
)
|
||||
|
||||
model_config = OnlineModelConfig(
|
||||
nemo_ctc=nemo_ctc_config,
|
||||
tokens=tokens,
|
||||
num_threads=num_threads,
|
||||
provider=provider,
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
feat_config = FeatureExtractorConfig(
|
||||
sampling_rate=sample_rate,
|
||||
feature_dim=feature_dim,
|
||||
)
|
||||
|
||||
endpoint_config = EndpointConfig(
|
||||
rule1_min_trailing_silence=rule1_min_trailing_silence,
|
||||
rule2_min_trailing_silence=rule2_min_trailing_silence,
|
||||
rule3_min_utterance_length=rule3_min_utterance_length,
|
||||
)
|
||||
|
||||
recognizer_config = OnlineRecognizerConfig(
|
||||
feat_config=feat_config,
|
||||
model_config=model_config,
|
||||
endpoint_config=endpoint_config,
|
||||
enable_endpoint=enable_endpoint_detection,
|
||||
decoding_method=decoding_method,
|
||||
)
|
||||
|
||||
self.recognizer = _Recognizer(recognizer_config)
|
||||
self.config = recognizer_config
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def from_wenet_ctc(
|
||||
cls,
|
||||
@@ -433,6 +541,7 @@ class OnlineRecognizer(object):
|
||||
rule3_min_utterance_length: float = 20.0,
|
||||
decoding_method: str = "greedy_search",
|
||||
provider: str = "cpu",
|
||||
debug: bool = False,
|
||||
):
|
||||
"""
|
||||
Please refer to
|
||||
@@ -497,6 +606,7 @@ class OnlineRecognizer(object):
|
||||
tokens=tokens,
|
||||
num_threads=num_threads,
|
||||
provider=provider,
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
feat_config = FeatureExtractorConfig(
|
||||
@@ -537,6 +647,9 @@ class OnlineRecognizer(object):
|
||||
def is_ready(self, s: OnlineStream) -> bool:
|
||||
return self.recognizer.is_ready(s)
|
||||
|
||||
def get_result_all(self, s: OnlineStream) -> OnlineRecognizerResult:
|
||||
return self.recognizer.get_result(s)
|
||||
|
||||
def get_result(self, s: OnlineStream) -> str:
|
||||
return self.recognizer.get_result(s).text.strip()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user