* rnnlm model inference supports num_threads setting * rnnlm params decouple num_thread and provider with Transducer. * fix python csrc bug which offline-lm-config.cc and online-lm-config.cc arguments problem * lm_num_threads and lm_provider set default values --------- Co-authored-by: cuidongcai1035 <cuidongcai1035@wezhuiyi.com>
21 lines
441 B
C++
21 lines
441 B
C++
// sherpa-onnx/csrc/online-lm.cc
|
|
//
|
|
// Copyright (c) 2023 Pingfeng Luo
|
|
// Copyright (c) 2023 Xiaomi Corporation
|
|
|
|
#include "sherpa-onnx/csrc/online-lm.h"
|
|
|
|
#include <algorithm>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
#include "sherpa-onnx/csrc/online-rnn-lm.h"
|
|
|
|
namespace sherpa_onnx {
|
|
|
|
std::unique_ptr<OnlineLM> OnlineLM::Create(const OnlineLMConfig &config) {
|
|
return std::make_unique<OnlineRnnLM>(config);
|
|
}
|
|
|
|
} // namespace sherpa_onnx
|