Add C++ runtime for vocos (#2014)

This commit is contained in:
Fangjun Kuang
2025-03-17 17:05:15 +08:00
committed by GitHub
parent 623cdc9eec
commit 0aacf02dd8
62 changed files with 558 additions and 162 deletions

View File

@@ -183,6 +183,8 @@ if(SHERPA_ONNX_ENABLE_TTS)
offline-tts-vits-model.cc
offline-tts.cc
piper-phonemize-lexicon.cc
vocoder.cc
vocos-vocoder.cc
)
endif()

View File

@@ -45,11 +45,21 @@ class HifiganVocoder::Impl {
Init(buf.data(), buf.size());
}
Ort::Value Run(Ort::Value mel) const {
std::vector<float> Run(Ort::Value mel) const {
auto out = sess_->Run({}, input_names_ptr_.data(), &mel, 1,
output_names_ptr_.data(), output_names_ptr_.size());
return std::move(out[0]);
std::vector<int64_t> audio_shape =
out[0].GetTensorTypeAndShapeInfo().GetShape();
int64_t total = 1;
// The output shape may be (1, 1, total) or (1, total) or (total,)
for (auto i : audio_shape) {
total *= i;
}
const float *p = out[0].GetTensorData<float>();
return {p, p + total};
}
private:
@@ -88,7 +98,7 @@ HifiganVocoder::HifiganVocoder(Manager *mgr, int32_t num_threads,
HifiganVocoder::~HifiganVocoder() = default;
Ort::Value HifiganVocoder::Run(Ort::Value mel) const {
std::vector<float> HifiganVocoder::Run(Ort::Value mel) const {
return impl_->Run(std::move(mel));
}

View File

@@ -7,14 +7,16 @@
#include <memory>
#include <string>
#include <vector>
#include "onnxruntime_cxx_api.h" // NOLINT
#include "sherpa-onnx/csrc/vocoder.h"
namespace sherpa_onnx {
class HifiganVocoder {
class HifiganVocoder : public Vocoder {
public:
~HifiganVocoder();
~HifiganVocoder() override;
HifiganVocoder(int32_t num_threads, const std::string &provider,
const std::string &model);
@@ -26,7 +28,7 @@ class HifiganVocoder {
/** @param mel A float32 tensor of shape (batch_size, feat_dim, num_frames).
* @return Return a float32 tensor of shape (batch_size, num_samples).
*/
Ort::Value Run(Ort::Value mel) const;
std::vector<float> Run(Ort::Value mel) const override;
private:
class Impl;

View File

@@ -13,7 +13,6 @@
#include "fst/extensions/far/far.h"
#include "kaldifst/csrc/kaldi-fst-io.h"
#include "kaldifst/csrc/text-normalizer.h"
#include "sherpa-onnx/csrc/hifigan-vocoder.h"
#include "sherpa-onnx/csrc/jieba-lexicon.h"
#include "sherpa-onnx/csrc/lexicon.h"
#include "sherpa-onnx/csrc/macros.h"
@@ -25,6 +24,7 @@
#include "sherpa-onnx/csrc/onnx-utils.h"
#include "sherpa-onnx/csrc/piper-phonemize-lexicon.h"
#include "sherpa-onnx/csrc/text-utils.h"
#include "sherpa-onnx/csrc/vocoder.h"
namespace sherpa_onnx {
@@ -33,9 +33,7 @@ class OfflineTtsMatchaImpl : public OfflineTtsImpl {
explicit OfflineTtsMatchaImpl(const OfflineTtsConfig &config)
: config_(config),
model_(std::make_unique<OfflineTtsMatchaModel>(config.model)),
vocoder_(std::make_unique<HifiganVocoder>(
config.model.num_threads, config.model.provider,
config.model.matcha.vocoder)) {
vocoder_(Vocoder::Create(config.model)) {
InitFrontend();
if (!config.rule_fsts.empty()) {
@@ -92,9 +90,7 @@ class OfflineTtsMatchaImpl : public OfflineTtsImpl {
OfflineTtsMatchaImpl(Manager *mgr, const OfflineTtsConfig &config)
: config_(config),
model_(std::make_unique<OfflineTtsMatchaModel>(mgr, config.model)),
vocoder_(std::make_unique<HifiganVocoder>(
mgr, config.model.num_threads, config.model.provider,
config.model.matcha.vocoder)) {
vocoder_(Vocoder::Create(mgr, config.model)) {
InitFrontend(mgr);
if (!config.rule_fsts.empty()) {
@@ -382,22 +378,11 @@ class OfflineTtsMatchaImpl : public OfflineTtsImpl {
memory_info, x.data(), x.size(), x_shape.data(), x_shape.size());
Ort::Value mel = model_->Run(std::move(x_tensor), sid, speed);
Ort::Value audio = vocoder_->Run(std::move(mel));
std::vector<int64_t> audio_shape =
audio.GetTensorTypeAndShapeInfo().GetShape();
int64_t total = 1;
// The output shape may be (1, 1, total) or (1, total) or (total,)
for (auto i : audio_shape) {
total *= i;
}
const float *p = audio.GetTensorData<float>();
GeneratedAudio ans;
ans.samples = vocoder_->Run(std::move(mel));
ans.sample_rate = model_->GetMetaData().sample_rate;
ans.samples = std::vector<float>(p, p + total);
float silence_scale = config_.silence_scale;
if (silence_scale != 1) {
@@ -410,7 +395,7 @@ class OfflineTtsMatchaImpl : public OfflineTtsImpl {
private:
OfflineTtsConfig config_;
std::unique_ptr<OfflineTtsMatchaModel> model_;
std::unique_ptr<HifiganVocoder> vocoder_;
std::unique_ptr<Vocoder> vocoder_;
std::vector<std::unique_ptr<kaldifst::TextNormalizer>> tn_list_;
std::unique_ptr<OfflineTtsFrontend> frontend_;
};

120
sherpa-onnx/csrc/vocoder.cc Normal file
View File

@@ -0,0 +1,120 @@
// sherpa-onnx/csrc/vocoder.cc
//
// Copyright (c) 2025 Xiaomi Corporation
#include "sherpa-onnx/csrc/vocoder.h"
#if __ANDROID_API__ >= 9
#include "android/asset_manager.h"
#include "android/asset_manager_jni.h"
#endif
#if __OHOS__
#include "rawfile/raw_file_manager.h"
#endif
#include "sherpa-onnx/csrc/file-utils.h"
#include "sherpa-onnx/csrc/hifigan-vocoder.h"
#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/onnx-utils.h"
#include "sherpa-onnx/csrc/vocos-vocoder.h"
namespace sherpa_onnx {
namespace {
enum class ModelType : std::uint8_t {
kHifigan,
kVocoos,
kUnknown,
};
} // namespace
static ModelType GetModelType(char *model_data, size_t model_data_length,
bool debug) {
Ort::Env env(ORT_LOGGING_LEVEL_ERROR);
Ort::SessionOptions sess_opts;
sess_opts.SetIntraOpNumThreads(1);
sess_opts.SetInterOpNumThreads(1);
auto sess = std::make_unique<Ort::Session>(env, model_data, model_data_length,
sess_opts);
Ort::ModelMetadata meta_data = sess->GetModelMetadata();
if (debug) {
std::ostringstream os;
PrintModelMetadata(os, meta_data);
#if __OHOS__
SHERPA_ONNX_LOGE("%{public}s", os.str().c_str());
#else
SHERPA_ONNX_LOGE("%s", os.str().c_str());
#endif
}
Ort::AllocatorWithDefaultOptions allocator;
auto model_type =
LookupCustomModelMetaData(meta_data, "model_type", allocator);
if (model_type.empty()) {
SHERPA_ONNX_LOGE(
"No model_type in the metadata!\n"
"Please make sure you are using the vocoder from "
"https://github.com/k2-fsa/sherpa-onnx/releases/tag/vocoder-models");
return ModelType::kUnknown;
}
if (model_type == "hifigan") {
return ModelType::kHifigan;
} else if (model_type == "vocos") {
return ModelType::kVocoos;
} else {
SHERPA_ONNX_LOGE("Unsupported model_type: %s", model_type.c_str());
return ModelType::kUnknown;
}
}
std::unique_ptr<Vocoder> Vocoder::Create(const OfflineTtsModelConfig &config) {
auto buffer = ReadFile(config.matcha.vocoder);
auto model_type = GetModelType(buffer.data(), buffer.size(), config.debug);
switch (model_type) {
case ModelType::kHifigan:
return std::make_unique<HifiganVocoder>(
config.num_threads, config.provider, config.matcha.vocoder);
case ModelType::kVocoos:
return std::make_unique<VocosVocoder>(config);
case ModelType::kUnknown:
SHERPA_ONNX_LOGE("Unknown model type in vocoder!");
return nullptr;
}
}
template <typename Manager>
std::unique_ptr<Vocoder> Vocoder::Create(Manager *mgr,
const OfflineTtsModelConfig &config) {
auto buffer = ReadFile(mgr, config.matcha.vocoder);
auto model_type = GetModelType(buffer.data(), buffer.size(), config.debug);
switch (model_type) {
case ModelType::kHifigan:
return std::make_unique<HifiganVocoder>(
config.num_threads, config.provider, config.matcha.vocoder);
case ModelType::kVocoos:
return std::make_unique<VocosVocoder>(config);
case ModelType::kUnknown:
SHERPA_ONNX_LOGE("Unknown model type in vocoder!");
return nullptr;
}
}
#if __ANDROID_API__ >= 9
template std::unique_ptr<Vocoder> Vocoder::Create(
AAssetManager *mgr, const OfflineTtsModelConfig &config);
#endif
#if __OHOS__
template std::unique_ptr<Vocoder> Vocoder::Create(
NativeResourceManager *mgr, const OfflineTtsModelConfig &config);
#endif
} // namespace sherpa_onnx

View File

@@ -0,0 +1,35 @@
// sherpa-onnx/csrc/vocoder.h
//
// Copyright (c) 2025 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_VOCODER_H_
#define SHERPA_ONNX_CSRC_VOCODER_H_
#include <memory>
#include <string>
#include <vector>
#include "onnxruntime_cxx_api.h" // NOLINT
#include "sherpa-onnx/csrc/offline-tts-model-config.h"
namespace sherpa_onnx {
class Vocoder {
public:
virtual ~Vocoder() = default;
static std::unique_ptr<Vocoder> Create(const OfflineTtsModelConfig &config);
template <typename Manager>
static std::unique_ptr<Vocoder> Create(Manager *mgr,
const OfflineTtsModelConfig &config);
/** @param mel A float32 tensor of shape (batch_size, feat_dim, num_frames).
* @return Return a float32 vector containing audio samples..
*/
virtual std::vector<float> Run(Ort::Value mel) const = 0;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_VOCODER_H_

View File

@@ -0,0 +1,194 @@
// sherpa-onnx/csrc/vocos-vocoder.cc
//
// Copyright (c) 2025 Xiaomi Corporation
#include "sherpa-onnx/csrc/vocos-vocoder.h"
#include <string>
#include <utility>
#include <vector>
#if __ANDROID_API__ >= 9
#include "android/asset_manager.h"
#include "android/asset_manager_jni.h"
#endif
#if __OHOS__
#include "rawfile/raw_file_manager.h"
#endif
#include "kaldi-native-fbank/csrc/istft.h"
#include "sherpa-onnx/csrc/file-utils.h"
#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/onnx-utils.h"
#include "sherpa-onnx/csrc/session.h"
namespace sherpa_onnx {
struct VocosModelMetaData {
int32_t n_fft;
int32_t hop_length;
int32_t win_length;
int32_t center;
int32_t normalized;
std::string window_type;
std::string pad_mode;
};
class VocosVocoder::Impl {
public:
explicit Impl(const OfflineTtsModelConfig &config)
: config_(config),
env_(ORT_LOGGING_LEVEL_ERROR),
sess_opts_(GetSessionOptions(config.num_threads, config.provider)),
allocator_{} {
auto buf = ReadFile(config.matcha.vocoder);
Init(buf.data(), buf.size());
}
template <typename Manager>
explicit Impl(Manager *mgr, const OfflineTtsModelConfig &config)
: config_(config),
env_(ORT_LOGGING_LEVEL_ERROR),
sess_opts_(GetSessionOptions(config.num_threads, config.provider)),
allocator_{} {
auto buf = ReadFile(mgr, config.matcha.vocoder);
Init(buf.data(), buf.size());
}
std::vector<float> Run(Ort::Value mel) const {
auto out = sess_->Run({}, input_names_ptr_.data(), &mel, 1,
output_names_ptr_.data(), output_names_ptr_.size());
std::vector<int64_t> shape = out[0].GetTensorTypeAndShapeInfo().GetShape();
if (shape[0] != 1) {
SHERPA_ONNX_LOGE("Support only batch size 1, given: %d",
static_cast<int32_t>(shape[0]));
SHERPA_ONNX_EXIT(-1);
}
knf::StftResult stft_result;
stft_result.num_frames = shape[2];
stft_result.real.resize(shape[1] * shape[2]);
stft_result.imag.resize(shape[1] * shape[2]);
// stft_result.real: (num_frames, n_fft/2+1), flattened in row major
// mag.shape: (batch_size, n_fft/2+1, num_frames)
const float *p_mag = out[0].GetTensorData<float>();
const float *p_x = out[1].GetTensorData<float>();
const float *p_y = out[2].GetTensorData<float>();
for (int32_t frame_index = 0; frame_index < static_cast<int32_t>(shape[2]);
++frame_index) {
for (int32_t bin = 0; bin < static_cast<int32_t>(shape[1]); ++bin) {
stft_result.real[frame_index * shape[1] + bin] =
p_mag[bin * shape[2] + frame_index] *
p_x[bin * shape[2] + frame_index];
stft_result.imag[frame_index * shape[1] + bin] =
p_mag[bin * shape[2] + frame_index] *
p_y[bin * shape[2] + frame_index];
}
}
knf::StftConfig stft_config;
stft_config.n_fft = meta_.n_fft;
stft_config.hop_length = meta_.hop_length;
stft_config.win_length = meta_.win_length;
stft_config.normalized = meta_.normalized;
stft_config.center = meta_.center;
stft_config.window_type = meta_.window_type;
stft_config.pad_mode = meta_.pad_mode;
knf::IStft istft(stft_config);
return istft.Compute(stft_result);
}
private:
void Init(void *model_data, size_t model_data_length) {
sess_ = std::make_unique<Ort::Session>(env_, model_data, model_data_length,
sess_opts_);
GetInputNames(sess_.get(), &input_names_, &input_names_ptr_);
GetOutputNames(sess_.get(), &output_names_, &output_names_ptr_);
// get meta data
Ort::ModelMetadata meta_data = sess_->GetModelMetadata();
if (config_.debug) {
std::ostringstream os;
os << "---Vocos model---\n";
PrintModelMetadata(os, meta_data);
os << "----------input names----------\n";
int32_t i = 0;
for (const auto &s : input_names_) {
os << i << " " << s << "\n";
++i;
}
os << "----------output names----------\n";
i = 0;
for (const auto &s : output_names_) {
os << i << " " << s << "\n";
++i;
}
#if __OHOS__
SHERPA_ONNX_LOGE("%{public}s\n", os.str().c_str());
#else
SHERPA_ONNX_LOGE("%s\n", os.str().c_str());
#endif
}
Ort::AllocatorWithDefaultOptions allocator; // used in the macro below
SHERPA_ONNX_READ_META_DATA(meta_.n_fft, "n_fft");
SHERPA_ONNX_READ_META_DATA(meta_.hop_length, "hop_length");
SHERPA_ONNX_READ_META_DATA(meta_.win_length, "win_length");
SHERPA_ONNX_READ_META_DATA(meta_.center, "center");
SHERPA_ONNX_READ_META_DATA(meta_.normalized, "normalized");
SHERPA_ONNX_READ_META_DATA_STR(meta_.window_type, "window_type");
SHERPA_ONNX_READ_META_DATA_STR(meta_.pad_mode, "pad_mode");
}
private:
OfflineTtsModelConfig config_;
VocosModelMetaData meta_;
Ort::Env env_;
Ort::SessionOptions sess_opts_;
Ort::AllocatorWithDefaultOptions allocator_;
std::unique_ptr<Ort::Session> sess_;
std::vector<std::string> input_names_;
std::vector<const char *> input_names_ptr_;
std::vector<std::string> output_names_;
std::vector<const char *> output_names_ptr_;
};
VocosVocoder::VocosVocoder(const OfflineTtsModelConfig &config)
: impl_(std::make_unique<Impl>(config)) {}
template <typename Manager>
VocosVocoder::VocosVocoder(Manager *mgr, const OfflineTtsModelConfig &config)
: impl_(std::make_unique<Impl>(mgr, config)) {}
VocosVocoder::~VocosVocoder() = default;
std::vector<float> VocosVocoder::Run(Ort::Value mel) const {
return impl_->Run(std::move(mel));
}
#if __ANDROID_API__ >= 9
template VocosVocoder::VocosVocoder(AAssetManager *mgr,
const OfflineTtsModelConfig &config);
#endif
#if __OHOS__
template VocosVocoder::VocosVocoder(NativeResourceManager *mgr,
const OfflineTtsModelConfig &config);
#endif
} // namespace sherpa_onnx

View File

@@ -0,0 +1,39 @@
// sherpa-onnx/csrc/vocos-vocoder.h
//
// Copyright (c) 2025 Xiaomi Corporation
#ifndef SHERPA_ONNX_CSRC_VOCOS_VOCODER_H_
#define SHERPA_ONNX_CSRC_VOCOS_VOCODER_H_
#include <memory>
#include <string>
#include <vector>
#include "onnxruntime_cxx_api.h" // NOLINT
#include "sherpa-onnx/csrc/offline-tts-model-config.h"
#include "sherpa-onnx/csrc/vocoder.h"
namespace sherpa_onnx {
class VocosVocoder : public Vocoder {
public:
~VocosVocoder() override;
explicit VocosVocoder(const OfflineTtsModelConfig &config);
template <typename Manager>
VocosVocoder(Manager *mgr, const OfflineTtsModelConfig &config);
/** @param mel A float32 tensor of shape (batch_size, feat_dim, num_frames).
* @return Return a float32 tensor of shape (batch_size, num_samples).
*/
std::vector<float> Run(Ort::Value mel) const override;
private:
class Impl;
std::unique_ptr<Impl> impl_;
};
} // namespace sherpa_onnx
#endif // SHERPA_ONNX_CSRC_VOCOS_VOCODER_H_