Add C and CXX API for homophone replacer (#2156)

This commit is contained in:
Fangjun Kuang
2025-04-27 22:09:13 +08:00
committed by GitHub
parent f64c58342b
commit e51c37eb2f
13 changed files with 586 additions and 1 deletions

View File

@@ -56,6 +56,9 @@ target_link_libraries(fire-red-asr-c-api sherpa-onnx-c-api)
add_executable(sense-voice-c-api sense-voice-c-api.c)
target_link_libraries(sense-voice-c-api sherpa-onnx-c-api)
add_executable(sense-voice-with-hr-c-api sense-voice-with-hr-c-api.c)
target_link_libraries(sense-voice-with-hr-c-api sherpa-onnx-c-api)
add_executable(dolphin-ctc-c-api dolphin-ctc-c-api.c)
target_link_libraries(dolphin-ctc-c-api sherpa-onnx-c-api)
@@ -68,6 +71,9 @@ target_link_libraries(zipformer-c-api sherpa-onnx-c-api)
add_executable(streaming-zipformer-c-api streaming-zipformer-c-api.c)
target_link_libraries(streaming-zipformer-c-api sherpa-onnx-c-api)
add_executable(streaming-zipformer-with-hr-c-api streaming-zipformer-with-hr-c-api.c)
target_link_libraries(streaming-zipformer-with-hr-c-api sherpa-onnx-c-api)
add_executable(paraformer-c-api paraformer-c-api.c)
target_link_libraries(paraformer-c-api sherpa-onnx-c-api)

View File

@@ -0,0 +1,99 @@
// c-api-examples/sense-voice-with-hr-c-api.c
//
// Copyright (c) 2024-2025 Xiaomi Corporation
//
// This file demonstrates how to use SenseVoice with sherpa-onnx's C API
// with homophone replacer.
// clang-format off
//
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
// tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
// rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
//
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
// tar xf dict.tar.bz2
//
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
//
// clang-format on
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "sherpa-onnx/c-api/c-api.h"
int32_t main() {
const char *wav_filename = "./test-hr.wav";
const char *model_filename =
"./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx";
const char *tokens_filename =
"./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt";
const char *language = "auto";
const char *provider = "cpu";
int32_t use_inverse_text_normalization = 1;
const SherpaOnnxWave *wave = SherpaOnnxReadWave(wav_filename);
if (wave == NULL) {
fprintf(stderr, "Failed to read %s\n", wav_filename);
return -1;
}
SherpaOnnxOfflineSenseVoiceModelConfig sense_voice_config;
memset(&sense_voice_config, 0, sizeof(sense_voice_config));
sense_voice_config.model = model_filename;
sense_voice_config.language = language;
sense_voice_config.use_itn = use_inverse_text_normalization;
// Offline model config
SherpaOnnxOfflineModelConfig offline_model_config;
memset(&offline_model_config, 0, sizeof(offline_model_config));
offline_model_config.debug = 1;
offline_model_config.num_threads = 1;
offline_model_config.provider = provider;
offline_model_config.tokens = tokens_filename;
offline_model_config.sense_voice = sense_voice_config;
// Recognizer config
SherpaOnnxOfflineRecognizerConfig recognizer_config;
memset(&recognizer_config, 0, sizeof(recognizer_config));
recognizer_config.decoding_method = "greedy_search";
recognizer_config.model_config = offline_model_config;
recognizer_config.hr.dict_dir = "./dict";
recognizer_config.hr.lexicon = "./lexicon.txt";
// Please see
// https://colab.research.google.com/drive/1jEaS3s8FbRJIcVQJv2EQx19EM_mnuARi?usp=sharing
// for how to generate your own replace.fst
recognizer_config.hr.rule_fsts = "./replace.fst";
const SherpaOnnxOfflineRecognizer *recognizer =
SherpaOnnxCreateOfflineRecognizer(&recognizer_config);
if (recognizer == NULL) {
fprintf(stderr, "Please check your config!\n");
SherpaOnnxFreeWave(wave);
return -1;
}
const SherpaOnnxOfflineStream *stream =
SherpaOnnxCreateOfflineStream(recognizer);
SherpaOnnxAcceptWaveformOffline(stream, wave->sample_rate, wave->samples,
wave->num_samples);
SherpaOnnxDecodeOfflineStream(recognizer, stream);
const SherpaOnnxOfflineRecognizerResult *result =
SherpaOnnxGetOfflineStreamResult(stream);
fprintf(stderr, "Decoded text: %s\n", result->text);
SherpaOnnxDestroyOfflineRecognizerResult(result);
SherpaOnnxDestroyOfflineStream(stream);
SherpaOnnxDestroyOfflineRecognizer(recognizer);
SherpaOnnxFreeWave(wave);
return 0;
}

View File

@@ -0,0 +1,158 @@
// c-api-examples/streaming-zipformer-with-hr-c-api.c
//
// Copyright (c) 2025 Xiaomi Corporation
//
// This file demonstrates how to use streaming Zipformer with sherpa-onnx's C
// API.
// clang-format off
//
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
// tar xvf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
// rm sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
//
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/dict.tar.bz2
// tar xf dict.tar.bz2
//
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/replace.fst
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/test-hr.wav
// wget https://github.com/k2-fsa/sherpa-onnx/releases/download/hr-files/lexicon.txt
//
// clang-format on
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "sherpa-onnx/c-api/c-api.h"
int32_t main() {
const char *wav_filename = "test-hr.wav";
const SherpaOnnxWave *wave = SherpaOnnxReadWave(wav_filename);
if (wave == NULL) {
fprintf(stderr, "Failed to read %s\n", wav_filename);
return -1;
}
// Online model config
SherpaOnnxOnlineModelConfig online_model_config;
memset(&online_model_config, 0, sizeof(online_model_config));
online_model_config.debug = 0;
online_model_config.num_threads = 1;
online_model_config.provider = "cpu";
online_model_config.tokens =
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt";
online_model_config.transducer.encoder =
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/"
"encoder-epoch-99-avg-1.int8.onnx";
// Note: We recommend not using int8.onnx for the decoder.
online_model_config.transducer.decoder =
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/"
"decoder-epoch-99-avg-1.onnx";
online_model_config.transducer.joiner =
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/"
"joiner-epoch-99-avg-1.int8.onnx";
online_model_config.tokens =
"./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt";
online_model_config.num_threads = 1;
// Recognizer config
SherpaOnnxOnlineRecognizerConfig recognizer_config;
memset(&recognizer_config, 0, sizeof(recognizer_config));
recognizer_config.decoding_method = "greedy_search";
recognizer_config.model_config = online_model_config;
recognizer_config.hr.dict_dir = "./dict";
recognizer_config.hr.lexicon = "./lexicon.txt";
// Please see
// https://colab.research.google.com/drive/1jEaS3s8FbRJIcVQJv2EQx19EM_mnuARi?usp=sharing
// for how to generate your own replace.fst
recognizer_config.hr.rule_fsts = "./replace.fst";
const SherpaOnnxOnlineRecognizer *recognizer =
SherpaOnnxCreateOnlineRecognizer(&recognizer_config);
if (recognizer == NULL) {
fprintf(stderr, "Please check your config!\n");
SherpaOnnxFreeWave(wave);
return -1;
}
const SherpaOnnxOnlineStream *stream =
SherpaOnnxCreateOnlineStream(recognizer);
const SherpaOnnxDisplay *display = SherpaOnnxCreateDisplay(50);
int32_t segment_id = 0;
// simulate streaming. You can choose an arbitrary N
#define N 3200
fprintf(stderr, "sample rate: %d, num samples: %d, duration: %.2f s\n",
wave->sample_rate, wave->num_samples,
(float)wave->num_samples / wave->sample_rate);
int32_t k = 0;
while (k < wave->num_samples) {
int32_t start = k;
int32_t end =
(start + N > wave->num_samples) ? wave->num_samples : (start + N);
k += N;
SherpaOnnxOnlineStreamAcceptWaveform(stream, wave->sample_rate,
wave->samples + start, end - start);
while (SherpaOnnxIsOnlineStreamReady(recognizer, stream)) {
SherpaOnnxDecodeOnlineStream(recognizer, stream);
}
const SherpaOnnxOnlineRecognizerResult *r =
SherpaOnnxGetOnlineStreamResult(recognizer, stream);
if (strlen(r->text)) {
SherpaOnnxPrint(display, segment_id, r->text);
}
if (SherpaOnnxOnlineStreamIsEndpoint(recognizer, stream)) {
if (strlen(r->text)) {
++segment_id;
}
SherpaOnnxOnlineStreamReset(recognizer, stream);
}
SherpaOnnxDestroyOnlineRecognizerResult(r);
}
// add some tail padding
float tail_paddings[4800] = {0}; // 0.3 seconds at 16 kHz sample rate
SherpaOnnxOnlineStreamAcceptWaveform(stream, wave->sample_rate, tail_paddings,
4800);
SherpaOnnxFreeWave(wave);
SherpaOnnxOnlineStreamInputFinished(stream);
while (SherpaOnnxIsOnlineStreamReady(recognizer, stream)) {
SherpaOnnxDecodeOnlineStream(recognizer, stream);
}
const SherpaOnnxOnlineRecognizerResult *r =
SherpaOnnxGetOnlineStreamResult(recognizer, stream);
if (strlen(r->text)) {
SherpaOnnxPrint(display, segment_id, r->text);
}
SherpaOnnxDestroyOnlineRecognizerResult(r);
SherpaOnnxDestroyDisplay(display);
SherpaOnnxDestroyOnlineStream(stream);
SherpaOnnxDestroyOnlineRecognizer(recognizer);
fprintf(stderr, "\n");
return 0;
}