Add config for TensorRT and CUDA execution provider (#992)

Signed-off-by: manickavela1998@gmail.com <manickavela1998@gmail.com>
Signed-off-by: manickavela1998@gmail.com <manickavela.arumugam@uniphore.com>
This commit is contained in:
Manix
2024-07-05 12:48:37 +05:30
committed by GitHub
parent f5e9a162d1
commit 55decb7bee
21 changed files with 622 additions and 49 deletions

View File

@@ -3,6 +3,7 @@ include_directories(${CMAKE_SOURCE_DIR})
set(srcs
audio-tagging.cc
circular-buffer.cc
cuda-config.cc
display.cc
endpoint.cc
features.cc
@@ -30,11 +31,13 @@ set(srcs
online-transducer-model-config.cc
online-wenet-ctc-model-config.cc
online-zipformer2-ctc-model-config.cc
provider-config.cc
sherpa-onnx.cc
silero-vad-model-config.cc
speaker-embedding-extractor.cc
speaker-embedding-manager.cc
spoken-language-identification.cc
tensorrt-config.cc
vad-model-config.cc
vad-model.cc
voice-activity-detector.cc

View File

@@ -0,0 +1,24 @@
// sherpa-onnx/python/csrc/cuda-config.cc
//
// Copyright (c) 2024 Uniphore (Author: Manickavela A)
#include "sherpa-onnx/python/csrc/cuda-config.h"
#include <memory>
#include <string>
#include "sherpa-onnx/csrc/provider-config.h"
namespace sherpa_onnx {
void PybindCudaConfig(py::module *m) {
using PyClass = CudaConfig;
py::class_<PyClass>(*m, "CudaConfig")
.def(py::init<>())
.def(py::init<int32_t>(),
py::arg("cudnn_conv_algo_search") = 1)
.def_readwrite("cudnn_conv_algo_search", &PyClass::cudnn_conv_algo_search)
.def("__str__", &PyClass::ToString);
}
} // namespace sherpa_onnx

View File

@@ -0,0 +1,16 @@
// sherpa-onnx/python/csrc/cuda-config.h
//
// Copyright (c) 2024 Uniphore (Author: Manickavela A)
#ifndef SHERPA_ONNX_PYTHON_CSRC_CUDA_CONFIG_H_
#define SHERPA_ONNX_PYTHON_CSRC_CUDA_CONFIG_H_
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
namespace sherpa_onnx {
void PybindCudaConfig(py::module *m);
}
#endif // SHERPA_ONNX_PYTHON_CSRC_CUDA_CONFIG_H_

View File

@@ -9,11 +9,13 @@
#include "sherpa-onnx/csrc/online-model-config.h"
#include "sherpa-onnx/csrc/online-transducer-model-config.h"
#include "sherpa-onnx/csrc/provider-config.h"
#include "sherpa-onnx/python/csrc/online-nemo-ctc-model-config.h"
#include "sherpa-onnx/python/csrc/online-paraformer-model-config.h"
#include "sherpa-onnx/python/csrc/online-transducer-model-config.h"
#include "sherpa-onnx/python/csrc/online-wenet-ctc-model-config.h"
#include "sherpa-onnx/python/csrc/online-zipformer2-ctc-model-config.h"
#include "sherpa-onnx/python/csrc/provider-config.h"
namespace sherpa_onnx {
@@ -23,6 +25,7 @@ void PybindOnlineModelConfig(py::module *m) {
PybindOnlineWenetCtcModelConfig(m);
PybindOnlineZipformer2CtcModelConfig(m);
PybindOnlineNeMoCtcModelConfig(m);
PybindProviderConfig(m);
using PyClass = OnlineModelConfig;
py::class_<PyClass>(*m, "OnlineModelConfig")
@@ -30,33 +33,34 @@ void PybindOnlineModelConfig(py::module *m) {
const OnlineParaformerModelConfig &,
const OnlineWenetCtcModelConfig &,
const OnlineZipformer2CtcModelConfig &,
const OnlineNeMoCtcModelConfig &, const std::string &,
int32_t, int32_t, bool, const std::string &,
const std::string &, const std::string &,
const OnlineNeMoCtcModelConfig &,
const ProviderConfig &,
const std::string &, int32_t, int32_t,
bool, const std::string &, const std::string &,
const std::string &>(),
py::arg("transducer") = OnlineTransducerModelConfig(),
py::arg("paraformer") = OnlineParaformerModelConfig(),
py::arg("wenet_ctc") = OnlineWenetCtcModelConfig(),
py::arg("zipformer2_ctc") = OnlineZipformer2CtcModelConfig(),
py::arg("nemo_ctc") = OnlineNeMoCtcModelConfig(), py::arg("tokens"),
py::arg("num_threads"), py::arg("warm_up") = 0,
py::arg("debug") = false, py::arg("provider") = "cpu",
py::arg("model_type") = "", py::arg("modeling_unit") = "",
py::arg("bpe_vocab") = "")
py::arg("nemo_ctc") = OnlineNeMoCtcModelConfig(),
py::arg("provider_config") = ProviderConfig(),
py::arg("tokens"), py::arg("num_threads"), py::arg("warm_up") = 0,
py::arg("debug") = false, py::arg("model_type") = "",
py::arg("modeling_unit") = "", py::arg("bpe_vocab") = "")
.def_readwrite("transducer", &PyClass::transducer)
.def_readwrite("paraformer", &PyClass::paraformer)
.def_readwrite("wenet_ctc", &PyClass::wenet_ctc)
.def_readwrite("zipformer2_ctc", &PyClass::zipformer2_ctc)
.def_readwrite("nemo_ctc", &PyClass::nemo_ctc)
.def_readwrite("provider_config", &PyClass::provider_config)
.def_readwrite("tokens", &PyClass::tokens)
.def_readwrite("num_threads", &PyClass::num_threads)
.def_readwrite("warm_up", &PyClass::warm_up)
.def_readwrite("debug", &PyClass::debug)
.def_readwrite("provider", &PyClass::provider)
.def_readwrite("model_type", &PyClass::model_type)
.def_readwrite("modeling_unit", &PyClass::modeling_unit)
.def_readwrite("bpe_vocab", &PyClass::bpe_vocab)
.def("validate", &PyClass::Validate)
.def("__str__", &PyClass::ToString);
}
} // namespace sherpa_onnx

View File

@@ -0,0 +1,39 @@
// sherpa-onnx/python/csrc/provider-config.cc
//
// Copyright (c) 2024 Uniphore (Author: Manickavela A)
#include "sherpa-onnx/python/csrc/provider-config.h"
#include <string>
#include "sherpa-onnx/csrc/provider-config.h"
#include "sherpa-onnx/python/csrc/cuda-config.h"
#include "sherpa-onnx/python/csrc/tensorrt-config.h"
namespace sherpa_onnx {
void PybindProviderConfig(py::module *m) {
PybindCudaConfig(m);
PybindTensorrtConfig(m);
using PyClass = ProviderConfig;
py::class_<PyClass>(*m, "ProviderConfig")
.def(py::init<>())
.def(py::init<const std::string &, int32_t>(),
py::arg("provider") = "cpu",
py::arg("device") = 0)
.def(py::init<const TensorrtConfig &, const CudaConfig &,
const std::string &, int32_t>(),
py::arg("trt_config") = TensorrtConfig{},
py::arg("cuda_config") = CudaConfig{},
py::arg("provider") = "cpu",
py::arg("device") = 0)
.def_readwrite("trt_config", &PyClass::trt_config)
.def_readwrite("cuda_config", &PyClass::cuda_config)
.def_readwrite("provider", &PyClass::provider)
.def_readwrite("device", &PyClass::device)
.def("__str__", &PyClass::ToString)
.def("validate", &PyClass::Validate);
}
} // namespace sherpa_onnx

View File

@@ -0,0 +1,16 @@
// sherpa-onnx/python/csrc/provider-config.h
//
// Copyright (c) 2024 Uniphore (Author: Manickavela A)
#ifndef SHERPA_ONNX_PYTHON_CSRC_PROVIDER_CONFIG_H_
#define SHERPA_ONNX_PYTHON_CSRC_PROVIDER_CONFIG_H_
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
namespace sherpa_onnx {
void PybindProviderConfig(py::module *m);
}
#endif // SHERPA_ONNX_PYTHON_CSRC_PROVIDER_CONFIG_H_

View File

@@ -51,7 +51,6 @@ PYBIND11_MODULE(_sherpa_onnx, m) {
PybindEndpoint(&m);
PybindOnlineRecognizer(&m);
PybindKeywordSpotter(&m);
PybindDisplay(&m);
PybindOfflineStream(&m);

View File

@@ -0,0 +1,72 @@
// sherpa-onnx/python/csrc/tensorrt-config.cc
//
// Copyright (c) 2024 Uniphore (Author: Manickavela A)
#include "sherpa-onnx/python/csrc/tensorrt-config.h"
#include <string>
#include <memory>
#include "sherpa-onnx/csrc/provider-config.h"
namespace sherpa_onnx {
void PybindTensorrtConfig(py::module *m) {
using PyClass = TensorrtConfig;
py::class_<PyClass>(*m, "TensorrtConfig")
.def(py::init<>())
.def(py::init([](int32_t trt_max_workspace_size,
int32_t trt_max_partition_iterations,
int32_t trt_min_subgraph_size,
bool trt_fp16_enable,
bool trt_detailed_build_log,
bool trt_engine_cache_enable,
bool trt_timing_cache_enable,
const std::string &trt_engine_cache_path,
const std::string &trt_timing_cache_path,
bool trt_dump_subgraphs) -> std::unique_ptr<PyClass> {
auto ans = std::make_unique<PyClass>();
ans->trt_max_workspace_size = trt_max_workspace_size;
ans->trt_max_partition_iterations = trt_max_partition_iterations;
ans->trt_min_subgraph_size = trt_min_subgraph_size;
ans->trt_fp16_enable = trt_fp16_enable;
ans->trt_detailed_build_log = trt_detailed_build_log;
ans->trt_engine_cache_enable = trt_engine_cache_enable;
ans->trt_timing_cache_enable = trt_timing_cache_enable;
ans->trt_engine_cache_path = trt_engine_cache_path;
ans->trt_timing_cache_path = trt_timing_cache_path;
ans->trt_dump_subgraphs = trt_dump_subgraphs;
return ans;
}),
py::arg("trt_max_workspace_size") = 2147483647,
py::arg("trt_max_partition_iterations") = 10,
py::arg("trt_min_subgraph_size") = 5,
py::arg("trt_fp16_enable") = true,
py::arg("trt_detailed_build_log") = false,
py::arg("trt_engine_cache_enable") = true,
py::arg("trt_timing_cache_enable") = true,
py::arg("trt_engine_cache_path") = ".",
py::arg("trt_timing_cache_path") = ".",
py::arg("trt_dump_subgraphs") = false)
.def_readwrite("trt_max_workspace_size",
&PyClass::trt_max_workspace_size)
.def_readwrite("trt_max_partition_iterations",
&PyClass::trt_max_partition_iterations)
.def_readwrite("trt_min_subgraph_size", &PyClass::trt_min_subgraph_size)
.def_readwrite("trt_fp16_enable", &PyClass::trt_fp16_enable)
.def_readwrite("trt_detailed_build_log",
&PyClass::trt_detailed_build_log)
.def_readwrite("trt_engine_cache_enable",
&PyClass::trt_engine_cache_enable)
.def_readwrite("trt_timing_cache_enable",
&PyClass::trt_timing_cache_enable)
.def_readwrite("trt_engine_cache_path", &PyClass::trt_engine_cache_path)
.def_readwrite("trt_timing_cache_path", &PyClass::trt_timing_cache_path)
.def_readwrite("trt_dump_subgraphs", &PyClass::trt_dump_subgraphs)
.def("__str__", &PyClass::ToString)
.def("validate", &PyClass::Validate);
}
} // namespace sherpa_onnx

View File

@@ -0,0 +1,16 @@
// sherpa-onnx/python/csrc/tensorrt-config.h
//
// Copyright (c) 2024 Uniphore (Author: Manickavela A)
#ifndef SHERPA_ONNX_PYTHON_CSRC_TENSORRT_CONFIG_H_
#define SHERPA_ONNX_PYTHON_CSRC_TENSORRT_CONFIG_H_
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
namespace sherpa_onnx {
void PybindTensorrtConfig(py::module *m);
}
#endif // SHERPA_ONNX_PYTHON_CSRC_TENSORRT_CONFIG_H_