This repository has been archived on 2025-08-26. You can view files and clone it, but cannot push or open issues or pull requests.
Files
enginex_bi_series-sherpa-onnx/sherpa-onnx/csrc/decode.cc

80 lines
2.5 KiB
C++
Raw Normal View History

2023-02-18 21:35:15 +08:00
// sherpa/csrc/decode.cc
//
// Copyright (c) 2023 Xiaomi Corporation
#include "sherpa-onnx/csrc/decode.h"
#include <assert.h>
#include <algorithm>
2023-02-18 21:35:15 +08:00
#include <utility>
#include <vector>
namespace sherpa_onnx {
2023-02-18 21:35:15 +08:00
static Ort::Value Clone(Ort::Value *v) {
auto type_and_shape = v->GetTensorTypeAndShapeInfo();
std::vector<int64_t> shape = type_and_shape.GetShape();
auto memory_info =
Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeDefault);
return Ort::Value::CreateTensor(memory_info, v->GetTensorMutableData<float>(),
type_and_shape.GetElementCount(),
shape.data(), shape.size());
}
static Ort::Value GetFrame(Ort::Value *encoder_out, int32_t t) {
std::vector<int64_t> encoder_out_shape =
2023-02-18 21:35:15 +08:00
encoder_out->GetTensorTypeAndShapeInfo().GetShape();
assert(encoder_out_shape[0] == 1);
2023-02-18 21:35:15 +08:00
int32_t encoder_out_dim = encoder_out_shape[2];
2023-02-18 21:35:15 +08:00
auto memory_info =
Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeDefault);
2023-02-18 21:35:15 +08:00
std::array<int64_t, 2> shape{1, encoder_out_dim};
2023-02-18 21:35:15 +08:00
return Ort::Value::CreateTensor(
memory_info,
encoder_out->GetTensorMutableData<float>() + t * encoder_out_dim,
encoder_out_dim, shape.data(), shape.size());
}
2023-02-18 21:35:15 +08:00
void GreedySearch(OnlineTransducerModel *model, Ort::Value encoder_out,
std::vector<int64_t> *hyp) {
std::vector<int64_t> encoder_out_shape =
encoder_out.GetTensorTypeAndShapeInfo().GetShape();
2023-02-18 21:35:15 +08:00
if (encoder_out_shape[0] > 1) {
fprintf(stderr, "Only batch_size=1 is implemented. Given: %d\n",
static_cast<int32_t>(encoder_out_shape[0]));
}
2023-02-18 21:35:15 +08:00
int32_t num_frames = encoder_out_shape[1];
int32_t vocab_size = model->VocabSize();
2023-02-18 21:35:15 +08:00
Ort::Value decoder_input = model->BuildDecoderInput(*hyp);
Ort::Value decoder_out = model->RunDecoder(std::move(decoder_input));
2023-02-18 21:35:15 +08:00
for (int32_t t = 0; t != num_frames; ++t) {
Ort::Value cur_encoder_out = GetFrame(&encoder_out, t);
Ort::Value logit =
model->RunJoiner(std::move(cur_encoder_out), Clone(&decoder_out));
const float *p_logit = logit.GetTensorData<float>();
auto y = static_cast<int32_t>(std::distance(
static_cast<const float *>(p_logit),
std::max_element(static_cast<const float *>(p_logit),
static_cast<const float *>(p_logit) + vocab_size)));
2023-02-18 21:35:15 +08:00
if (y != 0) {
hyp->push_back(y);
decoder_input = model->BuildDecoderInput(*hyp);
decoder_out = model->RunDecoder(std::move(decoder_input));
}
}
}
} // namespace sherpa_onnx