RNNLM model support lm_num_thread and lm_provider setting (#173)
* rnnlm model inference supports num_threads setting * rnnlm params decouple num_thread and provider with Transducer. * fix python csrc bug which offline-lm-config.cc and online-lm-config.cc arguments problem * lm_num_threads and lm_provider set default values --------- Co-authored-by: cuidongcai1035 <cuidongcai1035@wezhuiyi.com>
This commit is contained in:
@@ -11,7 +11,7 @@
|
||||
|
||||
#include "onnxruntime_cxx_api.h" // NOLINT
|
||||
#include "sherpa-onnx/csrc/hypothesis.h"
|
||||
#include "sherpa-onnx/csrc/online-recognizer.h"
|
||||
#include "sherpa-onnx/csrc/online-lm-config.h"
|
||||
|
||||
namespace sherpa_onnx {
|
||||
|
||||
@@ -19,7 +19,7 @@ class OnlineLM {
|
||||
public:
|
||||
virtual ~OnlineLM() = default;
|
||||
|
||||
static std::unique_ptr<OnlineLM> Create(const OnlineRecognizerConfig &config);
|
||||
static std::unique_ptr<OnlineLM> Create(const OnlineLMConfig &config);
|
||||
|
||||
virtual std::pair<Ort::Value, std::vector<Ort::Value>> GetInitStates() = 0;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user