166 lines
4.6 KiB
C++
166 lines
4.6 KiB
C++
// sherpa-onnx/csrc/sherpa-onnx-microphone.cc
|
|
//
|
|
// Copyright (c) 2022-2023 Xiaomi Corporation
|
|
|
|
#include <signal.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
|
|
#include <algorithm>
|
|
#include <cctype> // std::tolower
|
|
|
|
#include "portaudio.h" // NOLINT
|
|
#include "sherpa-onnx/csrc/display.h"
|
|
#include "sherpa-onnx/csrc/microphone.h"
|
|
#include "sherpa-onnx/csrc/online-recognizer.h"
|
|
|
|
bool stop = false;
|
|
|
|
static int32_t RecordCallback(const void *input_buffer,
|
|
void * /*output_buffer*/,
|
|
unsigned long frames_per_buffer, // NOLINT
|
|
const PaStreamCallbackTimeInfo * /*time_info*/,
|
|
PaStreamCallbackFlags /*status_flags*/,
|
|
void *user_data) {
|
|
auto stream = reinterpret_cast<sherpa_onnx::OnlineStream *>(user_data);
|
|
|
|
stream->AcceptWaveform(16000, reinterpret_cast<const float *>(input_buffer),
|
|
frames_per_buffer);
|
|
|
|
return stop ? paComplete : paContinue;
|
|
}
|
|
|
|
static void Handler(int32_t sig) {
|
|
stop = true;
|
|
fprintf(stderr, "\nCaught Ctrl + C. Exiting...\n");
|
|
}
|
|
|
|
int32_t main(int32_t argc, char *argv[]) {
|
|
if (argc < 5 || argc > 6) {
|
|
const char *usage = R"usage(
|
|
Usage:
|
|
./bin/sherpa-onnx-microphone \
|
|
/path/to/tokens.txt \
|
|
/path/to/encoder.onnx\
|
|
/path/to/decoder.onnx\
|
|
/path/to/joiner.onnx\
|
|
[num_threads]
|
|
|
|
Please refer to
|
|
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
|
|
for a list of pre-trained models to download.
|
|
)usage";
|
|
fprintf(stderr, "%s\n", usage);
|
|
fprintf(stderr, "argc, %d\n", argc);
|
|
|
|
return 0;
|
|
}
|
|
signal(SIGINT, Handler);
|
|
|
|
sherpa_onnx::OnlineRecognizerConfig config;
|
|
config.tokens = argv[1];
|
|
|
|
config.model_config.debug = false;
|
|
config.model_config.encoder_filename = argv[2];
|
|
config.model_config.decoder_filename = argv[3];
|
|
config.model_config.joiner_filename = argv[4];
|
|
|
|
config.model_config.num_threads = 2;
|
|
if (argc == 6 && atoi(argv[5]) > 0) {
|
|
config.model_config.num_threads = atoi(argv[5]);
|
|
}
|
|
|
|
config.enable_endpoint = true;
|
|
|
|
config.endpoint_config.rule1.min_trailing_silence = 2.4;
|
|
config.endpoint_config.rule2.min_trailing_silence = 1.2;
|
|
config.endpoint_config.rule3.min_utterance_length = 300;
|
|
|
|
fprintf(stderr, "%s\n", config.ToString().c_str());
|
|
|
|
sherpa_onnx::OnlineRecognizer recognizer(config);
|
|
auto s = recognizer.CreateStream();
|
|
|
|
sherpa_onnx::Microphone mic;
|
|
|
|
PaDeviceIndex num_devices = Pa_GetDeviceCount();
|
|
fprintf(stderr, "Num devices: %d\n", num_devices);
|
|
|
|
PaStreamParameters param;
|
|
|
|
param.device = Pa_GetDefaultInputDevice();
|
|
if (param.device == paNoDevice) {
|
|
fprintf(stderr, "No default input device found\n");
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
fprintf(stderr, "Use default device: %d\n", param.device);
|
|
|
|
const PaDeviceInfo *info = Pa_GetDeviceInfo(param.device);
|
|
fprintf(stderr, " Name: %s\n", info->name);
|
|
fprintf(stderr, " Max input channels: %d\n", info->maxInputChannels);
|
|
|
|
param.channelCount = 1;
|
|
param.sampleFormat = paFloat32;
|
|
|
|
param.suggestedLatency = info->defaultLowInputLatency;
|
|
param.hostApiSpecificStreamInfo = nullptr;
|
|
const float sample_rate = 16000;
|
|
|
|
PaStream *stream;
|
|
PaError err =
|
|
Pa_OpenStream(&stream, ¶m, nullptr, /* &outputParameters, */
|
|
sample_rate,
|
|
0, // frames per buffer
|
|
paClipOff, // we won't output out of range samples
|
|
// so don't bother clipping them
|
|
RecordCallback, s.get());
|
|
if (err != paNoError) {
|
|
fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
|
|
err = Pa_StartStream(stream);
|
|
fprintf(stderr, "Started\n");
|
|
|
|
if (err != paNoError) {
|
|
fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
|
|
std::string last_text;
|
|
int32_t segment_index = 0;
|
|
sherpa_onnx::Display display;
|
|
while (!stop) {
|
|
while (recognizer.IsReady(s.get())) {
|
|
recognizer.DecodeStream(s.get());
|
|
}
|
|
|
|
auto text = recognizer.GetResult(s.get()).text;
|
|
bool is_endpoint = recognizer.IsEndpoint(s.get());
|
|
|
|
if (!text.empty() && last_text != text) {
|
|
last_text = text;
|
|
|
|
std::transform(text.begin(), text.end(), text.begin(),
|
|
[](auto c) { return std::tolower(c); });
|
|
|
|
display.Print(segment_index, text);
|
|
}
|
|
|
|
if (!text.empty() && is_endpoint) {
|
|
++segment_index;
|
|
recognizer.Reset(s.get());
|
|
}
|
|
|
|
Pa_Sleep(20); // sleep for 20ms
|
|
}
|
|
|
|
err = Pa_CloseStream(stream);
|
|
if (err != paNoError) {
|
|
fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
|
|
return 0;
|
|
}
|