Add C++ and Python API for Dolphin CTC models (#2085)

This commit is contained in:
Fangjun Kuang
2025-04-02 19:09:00 +08:00
committed by GitHub
parent 1316719e23
commit 0de7e1b9f0
27 changed files with 671 additions and 26 deletions

View File

@@ -9,6 +9,7 @@ set(srcs
features.cc
keyword-spotter.cc
offline-ctc-fst-decoder-config.cc
offline-dolphin-model-config.cc
offline-fire-red-asr-model-config.cc
offline-lm-config.cc
offline-model-config.cc

View File

@@ -0,0 +1,23 @@
// sherpa-onnx/python/csrc/offline-dolphin-model-config.cc
//
// Copyright (c) 2025 Xiaomi Corporation
#include "sherpa-onnx/csrc/offline-dolphin-model-config.h"
#include <string>
#include <vector>
#include "sherpa-onnx/python/csrc/offline-dolphin-model-config.h"
namespace sherpa_onnx {
void PybindOfflineDolphinModelConfig(py::module *m) {
using PyClass = OfflineDolphinModelConfig;
py::class_<PyClass>(*m, "OfflineDolphinModelConfig")
.def(py::init<>())
.def(py::init<const std::string &>(), py::arg("model"))
.def_readwrite("model", &PyClass::model)
.def("__str__", &PyClass::ToString);
}
} // namespace sherpa_onnx

View File

@@ -0,0 +1,16 @@
// sherpa-onnx/python/csrc/offline-dolphin-model-config.h
//
// Copyright (c) 2025 Xiaomi Corporation
#ifndef SHERPA_ONNX_PYTHON_CSRC_OFFLINE_DOLPHIN_MODEL_CONFIG_H_
#define SHERPA_ONNX_PYTHON_CSRC_OFFLINE_DOLPHIN_MODEL_CONFIG_H_
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
namespace sherpa_onnx {
void PybindOfflineDolphinModelConfig(py::module *m);
}
#endif // SHERPA_ONNX_PYTHON_CSRC_OFFLINE_DOLPHIN_MODEL_CONFIG_H_

View File

@@ -8,6 +8,7 @@
#include <vector>
#include "sherpa-onnx/csrc/offline-model-config.h"
#include "sherpa-onnx/python/csrc/offline-dolphin-model-config.h"
#include "sherpa-onnx/python/csrc/offline-fire-red-asr-model-config.h"
#include "sherpa-onnx/python/csrc/offline-moonshine-model-config.h"
#include "sherpa-onnx/python/csrc/offline-nemo-enc-dec-ctc-model-config.h"
@@ -32,6 +33,7 @@ void PybindOfflineModelConfig(py::module *m) {
PybindOfflineWenetCtcModelConfig(m);
PybindOfflineSenseVoiceModelConfig(m);
PybindOfflineMoonshineModelConfig(m);
PybindOfflineDolphinModelConfig(m);
using PyClass = OfflineModelConfig;
py::class_<PyClass>(*m, "OfflineModelConfig")
@@ -44,7 +46,8 @@ void PybindOfflineModelConfig(py::module *m) {
const OfflineZipformerCtcModelConfig &,
const OfflineWenetCtcModelConfig &,
const OfflineSenseVoiceModelConfig &,
const OfflineMoonshineModelConfig &, const std::string &,
const OfflineMoonshineModelConfig &,
const OfflineDolphinModelConfig &, const std::string &,
const std::string &, int32_t, bool, const std::string &,
const std::string &, const std::string &,
const std::string &>(),
@@ -58,6 +61,7 @@ void PybindOfflineModelConfig(py::module *m) {
py::arg("wenet_ctc") = OfflineWenetCtcModelConfig(),
py::arg("sense_voice") = OfflineSenseVoiceModelConfig(),
py::arg("moonshine") = OfflineMoonshineModelConfig(),
py::arg("dolphin") = OfflineDolphinModelConfig(),
py::arg("telespeech_ctc") = "", py::arg("tokens"),
py::arg("num_threads"), py::arg("debug") = false,
py::arg("provider") = "cpu", py::arg("model_type") = "",
@@ -72,6 +76,7 @@ void PybindOfflineModelConfig(py::module *m) {
.def_readwrite("wenet_ctc", &PyClass::wenet_ctc)
.def_readwrite("sense_voice", &PyClass::sense_voice)
.def_readwrite("moonshine", &PyClass::moonshine)
.def_readwrite("dolphin", &PyClass::dolphin)
.def_readwrite("telespeech_ctc", &PyClass::telespeech_ctc)
.def_readwrite("tokens", &PyClass::tokens)
.def_readwrite("num_threads", &PyClass::num_threads)

View File

@@ -6,6 +6,7 @@ from typing import List, Optional
from _sherpa_onnx import (
FeatureExtractorConfig,
OfflineCtcFstDecoderConfig,
OfflineDolphinModelConfig,
OfflineFireRedAsrModelConfig,
OfflineLMConfig,
OfflineModelConfig,
@@ -408,6 +409,78 @@ class OfflineRecognizer(object):
self.config = recognizer_config
return self
@classmethod
def from_dolphin_ctc(
cls,
model: str,
tokens: str,
num_threads: int = 1,
sample_rate: int = 16000,
feature_dim: int = 80,
decoding_method: str = "greedy_search",
debug: bool = False,
provider: str = "cpu",
rule_fsts: str = "",
rule_fars: str = "",
):
"""
Please refer to
`<https://k2-fsa.github.io/sherpa/onnx/dolphin/index.html>`_
to download pre-trained models.
Args:
model:
Path to ``model.onnx`` or ``model.int8.onnx``.
tokens:
Path to ``tokens.txt``. Each line in ``tokens.txt`` contains two
columns::
symbol integer_id
num_threads:
Number of threads for neural network computation.
sample_rate:
Sample rate of the training data used to train the model.
feature_dim:
Dimension of the feature used to train the model.
decoding_method:
Valid values are greedy_search.
debug:
True to show debug messages.
provider:
onnxruntime execution providers. Valid values are: cpu, cuda, coreml.
rule_fsts:
If not empty, it specifies fsts for inverse text normalization.
If there are multiple fsts, they are separated by a comma.
rule_fars:
If not empty, it specifies fst archives for inverse text normalization.
If there are multiple archives, they are separated by a comma.
"""
self = cls.__new__(cls)
model_config = OfflineModelConfig(
dolphin=OfflineDolphinModelConfig(model=model),
tokens=tokens,
num_threads=num_threads,
debug=debug,
provider=provider,
)
feat_config = FeatureExtractorConfig(
sampling_rate=sample_rate,
feature_dim=feature_dim,
)
recognizer_config = OfflineRecognizerConfig(
feat_config=feat_config,
model_config=model_config,
decoding_method=decoding_method,
rule_fsts=rule_fsts,
rule_fars=rule_fars,
)
self.recognizer = _Recognizer(recognizer_config)
self.config = recognizer_config
return self
@classmethod
def from_nemo_ctc(
cls,