Add C and CXX API for homophone replacer (#2156)
This commit is contained in:
@@ -3,6 +3,9 @@ include_directories(${PROJECT_SOURCE_DIR})
|
||||
add_executable(streaming-zipformer-cxx-api ./streaming-zipformer-cxx-api.cc)
|
||||
target_link_libraries(streaming-zipformer-cxx-api sherpa-onnx-cxx-api)
|
||||
|
||||
add_executable(streaming-zipformer-with-hr-cxx-api ./streaming-zipformer-with-hr-cxx-api.cc)
|
||||
target_link_libraries(streaming-zipformer-with-hr-cxx-api sherpa-onnx-cxx-api)
|
||||
|
||||
add_executable(speech-enhancement-gtcrn-cxx-api ./speech-enhancement-gtcrn-cxx-api.cc)
|
||||
target_link_libraries(speech-enhancement-gtcrn-cxx-api sherpa-onnx-cxx-api)
|
||||
|
||||
@@ -24,6 +27,9 @@ target_link_libraries(moonshine-cxx-api sherpa-onnx-cxx-api)
|
||||
add_executable(sense-voice-cxx-api ./sense-voice-cxx-api.cc)
|
||||
target_link_libraries(sense-voice-cxx-api sherpa-onnx-cxx-api)
|
||||
|
||||
add_executable(sense-voice-with-hr-cxx-api ./sense-voice-with-hr-cxx-api.cc)
|
||||
target_link_libraries(sense-voice-with-hr-cxx-api sherpa-onnx-cxx-api)
|
||||
|
||||
add_executable(dolphin-ctc-cxx-api ./dolphin-ctc-cxx-api.cc)
|
||||
target_link_libraries(dolphin-ctc-cxx-api sherpa-onnx-cxx-api)
|
||||
|
||||
|
||||
92
cxx-api-examples/sense-voice-with-hr-cxx-api.cc
Normal file
92
cxx-api-examples/sense-voice-with-hr-cxx-api.cc
Normal file
@@ -0,0 +1,92 @@
|
||||
// cxx-api-examples/sense-voice-with-hr-cxx-api.cc
|
||||
//
|
||||
// Copyright (c) 2024-2025 Xiaomi Corporation
|
||||
|
||||
//
|
||||
// This file demonstrates how to use sense voice with sherpa-onnx's C++ API.
|
||||
//
|
||||
// clang-format off
|
||||
//
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
// tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
// rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
//
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
|
||||
// tar xf dict.tar.bz2
|
||||
//
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
|
||||
//
|
||||
// clang-format on
|
||||
|
||||
#include <chrono> // NOLINT
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "sherpa-onnx/c-api/cxx-api.h"
|
||||
|
||||
int32_t main() {
|
||||
using namespace sherpa_onnx::cxx; // NOLINT
|
||||
OfflineRecognizerConfig config;
|
||||
|
||||
config.model_config.sense_voice.model =
|
||||
"./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx";
|
||||
config.model_config.sense_voice.use_itn = true;
|
||||
config.model_config.sense_voice.language = "auto";
|
||||
config.model_config.tokens =
|
||||
"./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt";
|
||||
config.hr.dict_dir = "./dict";
|
||||
config.hr.lexicon = "./lexicon.txt";
|
||||
|
||||
// Please see
|
||||
// https://colab.research.google.com/drive/1jEaS3s8FbRJIcVQJv2EQx19EM_mnuARi?usp=sharing
|
||||
// for how to generate your own replace.fst
|
||||
config.hr.rule_fsts = "./replace.fst";
|
||||
|
||||
config.model_config.num_threads = 1;
|
||||
|
||||
std::cout << "Loading model\n";
|
||||
OfflineRecognizer recongizer = OfflineRecognizer::Create(config);
|
||||
if (!recongizer.Get()) {
|
||||
std::cerr << "Please check your config\n";
|
||||
return -1;
|
||||
}
|
||||
std::cout << "Loading model done\n";
|
||||
|
||||
std::string wave_filename = "./test-hr.wav";
|
||||
|
||||
Wave wave = ReadWave(wave_filename);
|
||||
if (wave.samples.empty()) {
|
||||
std::cerr << "Failed to read: '" << wave_filename << "'\n";
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::cout << "Start recognition\n";
|
||||
const auto begin = std::chrono::steady_clock::now();
|
||||
|
||||
OfflineStream stream = recongizer.CreateStream();
|
||||
stream.AcceptWaveform(wave.sample_rate, wave.samples.data(),
|
||||
wave.samples.size());
|
||||
|
||||
recongizer.Decode(&stream);
|
||||
|
||||
OfflineRecognizerResult result = recongizer.GetResult(&stream);
|
||||
|
||||
const auto end = std::chrono::steady_clock::now();
|
||||
const float elapsed_seconds =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
|
||||
.count() /
|
||||
1000.;
|
||||
float duration = wave.samples.size() / static_cast<float>(wave.sample_rate);
|
||||
float rtf = elapsed_seconds / duration;
|
||||
|
||||
std::cout << "text: " << result.text << "\n";
|
||||
printf("Number of threads: %d\n", config.model_config.num_threads);
|
||||
printf("Duration: %.3fs\n", duration);
|
||||
printf("Elapsed seconds: %.3fs\n", elapsed_seconds);
|
||||
printf("(Real time factor) RTF = %.3f / %.3f = %.3f\n", elapsed_seconds,
|
||||
duration, rtf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
106
cxx-api-examples/streaming-zipformer-with-hr-cxx-api.cc
Normal file
106
cxx-api-examples/streaming-zipformer-with-hr-cxx-api.cc
Normal file
@@ -0,0 +1,106 @@
|
||||
// cxx-api-examples/streaming-zipformer-with-hr-cxx-api.cc
|
||||
// Copyright (c) 2024-2025 Xiaomi Corporation
|
||||
|
||||
//
|
||||
// This file demonstrates how to use streaming Zipformer
|
||||
// with sherpa-onnx's C++ API.
|
||||
//
|
||||
// clang-format off
|
||||
//
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
|
||||
// tar xvf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
|
||||
// rm sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
|
||||
//
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
|
||||
// tar xf dict.tar.bz2
|
||||
//
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
|
||||
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
|
||||
//
|
||||
// clang-format on
|
||||
|
||||
#include <chrono> // NOLINT
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "sherpa-onnx/c-api/cxx-api.h"
|
||||
|
||||
int32_t main() {
|
||||
using namespace sherpa_onnx::cxx; // NOLINT
|
||||
OnlineRecognizerConfig config;
|
||||
|
||||
// please see
|
||||
// https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#csukuangfj-sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20-bilingual-chinese-english
|
||||
config.model_config.transducer.encoder =
|
||||
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/"
|
||||
"encoder-epoch-99-avg-1.int8.onnx";
|
||||
|
||||
// Note: We recommend not using int8.onnx for the decoder.
|
||||
config.model_config.transducer.decoder =
|
||||
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/"
|
||||
"decoder-epoch-99-avg-1.onnx";
|
||||
|
||||
config.model_config.transducer.joiner =
|
||||
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/"
|
||||
"joiner-epoch-99-avg-1.int8.onnx";
|
||||
|
||||
config.model_config.tokens =
|
||||
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt";
|
||||
|
||||
config.model_config.num_threads = 1;
|
||||
|
||||
config.hr.dict_dir = "./dict";
|
||||
config.hr.lexicon = "./lexicon.txt";
|
||||
|
||||
// Please see
|
||||
// https://colab.research.google.com/drive/1jEaS3s8FbRJIcVQJv2EQx19EM_mnuARi?usp=sharing
|
||||
// for how to generate your own replace.fst
|
||||
config.hr.rule_fsts = "./replace.fst";
|
||||
|
||||
std::cout << "Loading model\n";
|
||||
OnlineRecognizer recongizer = OnlineRecognizer::Create(config);
|
||||
if (!recongizer.Get()) {
|
||||
std::cerr << "Please check your config\n";
|
||||
return -1;
|
||||
}
|
||||
std::cout << "Loading model done\n";
|
||||
|
||||
std::string wave_filename = "./test-hr.wav";
|
||||
Wave wave = ReadWave(wave_filename);
|
||||
if (wave.samples.empty()) {
|
||||
std::cerr << "Failed to read: '" << wave_filename << "'\n";
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::cout << "Start recognition\n";
|
||||
const auto begin = std::chrono::steady_clock::now();
|
||||
|
||||
OnlineStream stream = recongizer.CreateStream();
|
||||
stream.AcceptWaveform(wave.sample_rate, wave.samples.data(),
|
||||
wave.samples.size());
|
||||
stream.InputFinished();
|
||||
|
||||
while (recongizer.IsReady(&stream)) {
|
||||
recongizer.Decode(&stream);
|
||||
}
|
||||
|
||||
OnlineRecognizerResult result = recongizer.GetResult(&stream);
|
||||
|
||||
const auto end = std::chrono::steady_clock::now();
|
||||
const float elapsed_seconds =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - begin)
|
||||
.count() /
|
||||
1000.;
|
||||
float duration = wave.samples.size() / static_cast<float>(wave.sample_rate);
|
||||
float rtf = elapsed_seconds / duration;
|
||||
|
||||
std::cout << "text: " << result.text << "\n";
|
||||
printf("Number of threads: %d\n", config.model_config.num_threads);
|
||||
printf("Duration: %.3fs\n", duration);
|
||||
printf("Elapsed seconds: %.3fs\n", elapsed_seconds);
|
||||
printf("(Real time factor) RTF = %.3f / %.3f = %.3f\n", elapsed_seconds,
|
||||
duration, rtf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user