RNNLM model support lm_num_thread and lm_provider setting (#173)

* rnnlm model inference supports num_threads setting

* rnnlm params decouple num_thread and provider with Transducer.

* fix python csrc bug which offline-lm-config.cc and online-lm-config.cc arguments problem

* lm_num_threads and lm_provider set default values

---------

Co-authored-by: cuidongcai1035 <cuidongcai1035@wezhuiyi.com>
This commit is contained in:
keanu
2023-06-12 15:51:27 +08:00
committed by GitHub
parent 13b33fcc08
commit 1a1b9fd236
18 changed files with 67 additions and 31 deletions

View File

@@ -19,12 +19,12 @@ namespace sherpa_onnx {
class OnlineRnnLM::Impl {
public:
explicit Impl(const OnlineRecognizerConfig &config)
: config_(config.lm_config),
explicit Impl(const OnlineLMConfig &config)
: config_(config),
env_(ORT_LOGGING_LEVEL_ERROR),
sess_opts_{GetSessionOptions(config.model_config)},
sess_opts_{GetSessionOptions(config)},
allocator_{} {
Init(config.lm_config);
Init(config);
}
void ComputeLMScore(float scale, Hypothesis *hyp) {
@@ -143,7 +143,7 @@ class OnlineRnnLM::Impl {
int32_t sos_id_ = 1;
};
OnlineRnnLM::OnlineRnnLM(const OnlineRecognizerConfig &config)
OnlineRnnLM::OnlineRnnLM(const OnlineLMConfig &config)
: impl_(std::make_unique<Impl>(config)) {}
OnlineRnnLM::~OnlineRnnLM() = default;