This repository has been archived on 2025-08-26. You can view files and clone it, but cannot push or open issues or pull requests.
Files
enginex_bi_series-sherpa-onnx/sherpa-onnx/csrc/online-lm-config.h
keanu 1a1b9fd236 RNNLM model support lm_num_thread and lm_provider setting (#173)
* rnnlm model inference supports num_threads setting

* rnnlm params decouple num_thread and provider with Transducer.

* fix python csrc bug which offline-lm-config.cc and online-lm-config.cc arguments problem

* lm_num_threads and lm_provider set default values

---------

Co-authored-by: cuidongcai1035 <cuidongcai1035@wezhuiyi.com>
2023-06-12 15:51:27 +08:00

40 lines
896 B
C++

// sherpa-onnx/csrc/online-lm-config.h
//
// Copyright (c) 2023 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_ONLINE_LM_CONFIG_H_
#define SHERPA_ONNX_CSRC_ONLINE_LM_CONFIG_H_
#include <string>
#include "sherpa-onnx/csrc/parse-options.h"
namespace sherpa_onnx {
struct OnlineLMConfig {
// path to the onnx model
std::string model;
// LM scale
float scale = 0.5;
int32_t lm_num_threads = 1;
std::string lm_provider = "cpu";
OnlineLMConfig() = default;
OnlineLMConfig(const std::string &model, float scale, int32_t lm_num_threads,
const std::string &lm_provider)
: model(model),
scale(scale),
lm_num_threads(lm_num_threads),
lm_provider(lm_provider) {}
void Register(ParseOptions *po);
bool Validate() const;
std::string ToString() const;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_ONLINE_LM_CONFIG_H_