This repository has been archived on 2025-08-26. You can view files and clone it, but cannot push or open issues or pull requests.
Files
enginex-mr_series-sherpa-onnx/sherpa-onnx/csrc/offline-lm-config.cc
keanu 1a1b9fd236 RNNLM model support lm_num_thread and lm_provider setting (#173)
* rnnlm model inference supports num_threads setting

* rnnlm params decouple num_thread and provider with Transducer.

* fix python csrc bug which offline-lm-config.cc and online-lm-config.cc arguments problem

* lm_num_threads and lm_provider set default values

---------

Co-authored-by: cuidongcai1035 <cuidongcai1035@wezhuiyi.com>
2023-06-12 15:51:27 +08:00

43 lines
1.0 KiB
C++

// sherpa-onnx/csrc/offline-lm-config.cc
//
// Copyright (c) 2023 Xiaomi Corporation
#include "sherpa-onnx/csrc/offline-lm-config.h"
#include <string>
#include "sherpa-onnx/csrc/file-utils.h"
#include "sherpa-onnx/csrc/macros.h"
namespace sherpa_onnx {
void OfflineLMConfig::Register(ParseOptions *po) {
po->Register("lm", &model, "Path to LM model.");
po->Register("lm-scale", &scale, "LM scale.");
po->Register("lm-num-threads", &lm_num_threads,
"Number of threads to run the neural network of LM model");
po->Register("lm-provider", &lm_provider,
"Specify a provider to LM model use: cpu, cuda, coreml");
}
bool OfflineLMConfig::Validate() const {
if (!FileExists(model)) {
SHERPA_ONNX_LOGE("%s does not exist", model.c_str());
return false;
}
return true;
}
std::string OfflineLMConfig::ToString() const {
std::ostringstream os;
os << "OfflineLMConfig(";
os << "model=\"" << model << "\", ";
os << "scale=" << scale << ")";
return os.str();
}
} // namespace sherpa_onnx