This repository has been archived on 2025-08-26. You can view files and clone it, but cannot push or open issues or pull requests.
Files
enginex_bi_series-sherpa-onnx/sherpa-onnx/csrc/online-lm-config.h
Askars Salimbajevs f0960342ad Add LODR support to online and offline recognizers (#2026)
This PR integrates LODR (Level-Ordered Deterministic Rescoring) support from Icefall into both online and offline recognizers, enabling LODR for LM shallow fusion and LM rescore.

- Extended OnlineLMConfig and OfflineLMConfig to include lodr_fst, lodr_scale, and lodr_backoff_id.
- Implemented LodrFst and LodrStateCost classes and wired them into RNN LM scoring in both online and offline code paths.
- Updated Python bindings, CLI entry points, examples, and CI test scripts to accept and exercise the new LODR options.
2025-07-09 16:23:46 +08:00

51 lines
1.3 KiB
C++

// sherpa-onnx/csrc/online-lm-config.h
//
// Copyright (c) 2023 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_ONLINE_LM_CONFIG_H_
#define SHERPA_ONNX_CSRC_ONLINE_LM_CONFIG_H_
#include <string>
#include "sherpa-onnx/csrc/parse-options.h"
namespace sherpa_onnx {
struct OnlineLMConfig {
// path to the onnx model
std::string model;
// LM scale
float scale = 0.5;
int32_t lm_num_threads = 1;
std::string lm_provider = "cpu";
std::string lodr_fst;
float lodr_scale = 0.01;
int32_t lodr_backoff_id = -1; // -1 means not set
// enable shallow fusion
bool shallow_fusion = true;
OnlineLMConfig() = default;
OnlineLMConfig(const std::string &model, float scale, int32_t lm_num_threads,
const std::string &lm_provider, bool shallow_fusion,
const std::string &lodr_fst, float lodr_scale,
int32_t lodr_backoff_id)
: model(model),
scale(scale),
lm_num_threads(lm_num_threads),
lm_provider(lm_provider),
shallow_fusion(shallow_fusion),
lodr_fst(lodr_fst),
lodr_scale(lodr_scale),
lodr_backoff_id(lodr_backoff_id) {}
void Register(ParseOptions *po);
bool Validate() const;
std::string ToString() const;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_ONLINE_LM_CONFIG_H_