Add VAD examples using ALSA for recording (#739)

This commit is contained in:
Fangjun Kuang
2024-04-08 16:41:01 +08:00
committed by GitHub
parent a5f8fbc83f
commit 6fb8ceda57
17 changed files with 601 additions and 9 deletions

View File

@@ -251,6 +251,7 @@ if(SHERPA_ONNX_HAS_ALSA AND SHERPA_ONNX_ENABLE_BINARY)
add_executable(sherpa-onnx-keyword-spotter-alsa sherpa-onnx-keyword-spotter-alsa.cc alsa.cc)
add_executable(sherpa-onnx-alsa-offline sherpa-onnx-alsa-offline.cc alsa.cc)
add_executable(sherpa-onnx-alsa-offline-speaker-identification sherpa-onnx-alsa-offline-speaker-identification.cc alsa.cc)
add_executable(sherpa-onnx-vad-alsa sherpa-onnx-vad-alsa.cc alsa.cc)
if(SHERPA_ONNX_ENABLE_TTS)
@@ -259,9 +260,10 @@ if(SHERPA_ONNX_HAS_ALSA AND SHERPA_ONNX_ENABLE_BINARY)
set(exes
sherpa-onnx-alsa
sherpa-onnx-keyword-spotter-alsa
sherpa-onnx-alsa-offline
sherpa-onnx-alsa-offline-speaker-identification
sherpa-onnx-keyword-spotter-alsa
sherpa-onnx-vad-alsa
)
if(SHERPA_ONNX_ENABLE_TTS)

View File

@@ -0,0 +1,132 @@
// sherpa-onnx/csrc/sherpa-onnx-vad-alsa.cc
//
// Copyright (c) 2024 Xiaomi Corporation
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include "sherpa-onnx/csrc/alsa.h"
#include "sherpa-onnx/csrc/circular-buffer.h"
#include "sherpa-onnx/csrc/voice-activity-detector.h"
#include "sherpa-onnx/csrc/wave-writer.h"
bool stop = false;
static void Handler(int32_t sig) {
stop = true;
fprintf(stderr, "\nCaught Ctrl + C. Exiting...\n");
}
int32_t main(int32_t argc, char *argv[]) {
signal(SIGINT, Handler);
const char *kUsageMessage = R"usage(
This program shows how to use VAD in sherpa-onnx.
./bin/sherpa-onnx-vad-alsa \
--silero-vad-model=/path/to/silero_vad.onnx \
device_name
Please download silero_vad.onnx from
https://github.com/snakers4/silero-vad/blob/master/files/silero_vad.onnx
For instance, use
wget https://github.com/snakers4/silero-vad/raw/master/files/silero_vad.onnx
The device name specifies which microphone to use in case there are several
on your system. You can use
arecord -l
to find all available microphones on your computer. For instance, if it outputs
**** List of CAPTURE Hardware Devices ****
card 3: UACDemoV10 [UACDemoV1.0], device 0: USB Audio [USB Audio]
Subdevices: 1/1
Subdevice #0: subdevice #0
and if you want to select card 3 and the device 0 on that card, please use:
plughw:3,0
as the device_name.
)usage";
sherpa_onnx::ParseOptions po(kUsageMessage);
sherpa_onnx::VadModelConfig config;
config.Register(&po);
po.Read(argc, argv);
if (po.NumArgs() != 1) {
fprintf(stderr, "Please provide only 1 argument: the device name\n");
po.PrintUsage();
exit(EXIT_FAILURE);
}
fprintf(stderr, "%s\n", config.ToString().c_str());
if (!config.Validate()) {
fprintf(stderr, "Errors in config!\n");
return -1;
}
std::string device_name = po.GetArg(1);
sherpa_onnx::Alsa alsa(device_name.c_str());
fprintf(stderr, "Use recording device: %s\n", device_name.c_str());
int32_t sample_rate = 16000;
if (alsa.GetExpectedSampleRate() != sample_rate) {
fprintf(stderr, "sample rate: %d != %d\n", alsa.GetExpectedSampleRate(),
sample_rate);
exit(-1);
}
int32_t chunk = 0.1 * alsa.GetActualSampleRate();
auto vad = std::make_unique<sherpa_onnx::VoiceActivityDetector>(config);
fprintf(stderr, "Started. Please speak\n");
int32_t window_size = config.silero_vad.window_size;
bool printed = false;
int32_t k = 0;
while (!stop) {
{
const std::vector<float> &samples = alsa.Read(chunk);
vad->AcceptWaveform(samples.data(), samples.size());
if (vad->IsSpeechDetected() && !printed) {
printed = true;
fprintf(stderr, "\nDetected speech!\n");
}
if (!vad->IsSpeechDetected()) {
printed = false;
}
while (!vad->Empty()) {
const auto &segment = vad->Front();
float duration =
segment.samples.size() / static_cast<float>(sample_rate);
fprintf(stderr, "Duration: %.3f seconds\n", duration);
char filename[128];
snprintf(filename, sizeof(filename), "seg-%d-%.3fs.wav", k, duration);
k += 1;
sherpa_onnx::WriteWave(filename, 16000, segment.samples.data(),
segment.samples.size());
fprintf(stderr, "Saved to %s\n", filename);
fprintf(stderr, "----------\n");
vad->Pop();
}
}
}
return 0;
}

View File

@@ -13,6 +13,7 @@
#include "sherpa-onnx/csrc/circular-buffer.h"
#include "sherpa-onnx/csrc/microphone.h"
#include "sherpa-onnx/csrc/voice-activity-detector.h"
#include "sherpa-onnx/csrc/wave-writer.h"
bool stop = false;
std::mutex mutex;
@@ -122,6 +123,7 @@ wget https://github.com/snakers4/silero-vad/raw/master/files/silero_vad.onnx
int32_t window_size = config.silero_vad.window_size;
bool printed = false;
int32_t k = 0;
while (!stop) {
{
std::lock_guard<std::mutex> lock(mutex);
@@ -140,9 +142,19 @@ wget https://github.com/snakers4/silero-vad/raw/master/files/silero_vad.onnx
}
while (!vad->Empty()) {
float duration = vad->Front().samples.size() / sample_rate;
vad->Pop();
const auto &segment = vad->Front();
float duration = segment.samples.size() / sample_rate;
fprintf(stderr, "Duration: %.3f seconds\n", duration);
char filename[128];
snprintf(filename, sizeof(filename), "seg-%d-%.3fs.wav", k, duration);
k += 1;
sherpa_onnx::WriteWave(filename, 16000, segment.samples.data(),
segment.samples.size());
fprintf(stderr, "Saved to %s\n", filename);
fprintf(stderr, "----------\n");
vad->Pop();
}
}
}

View File

@@ -35,6 +35,7 @@ set(srcs
vad-model-config.cc
vad-model.cc
voice-activity-detector.cc
wave-writer.cc
)
if(SHERPA_ONNX_HAS_ALSA)
list(APPEND srcs ${CMAKE_SOURCE_DIR}/sherpa-onnx/csrc/alsa.cc alsa.cc)

View File

@@ -26,6 +26,7 @@
#include "sherpa-onnx/python/csrc/vad-model-config.h"
#include "sherpa-onnx/python/csrc/vad-model.h"
#include "sherpa-onnx/python/csrc/voice-activity-detector.h"
#include "sherpa-onnx/python/csrc/wave-writer.h"
#if SHERPA_ONNX_ENABLE_TTS == 1
#include "sherpa-onnx/python/csrc/offline-tts.h"
@@ -36,6 +37,8 @@ namespace sherpa_onnx {
PYBIND11_MODULE(_sherpa_onnx, m) {
m.doc() = "pybind11 binding of sherpa-onnx";
PybindWaveWriter(&m);
PybindFeatures(&m);
PybindOnlineCtcFstDecoderConfig(&m);
PybindOnlineModelConfig(&m);

View File

@@ -0,0 +1,27 @@
// sherpa-onnx/python/csrc/wave-writer.cc
//
// Copyright (c) 2024 Xiaomi Corporation
#include "sherpa-onnx/python/csrc/wave-writer.h"
#include <string>
#include <vector>
#include "sherpa-onnx/csrc/wave-writer.h"
namespace sherpa_onnx {
void PybindWaveWriter(py::module *m) {
m->def(
"write_wave",
[](const std::string &filename, const std::vector<float> &samples,
int32_t sample_rate) -> bool {
bool ok =
WriteWave(filename, sample_rate, samples.data(), samples.size());
return ok;
},
py::arg("filename"), py::arg("samples"), py::arg("sample_rate"));
}
} // namespace sherpa_onnx

View File

@@ -0,0 +1,16 @@
// sherpa-onnx/python/csrc/wave-writer.h
//
// Copyright (c) 2024 Xiaomi Corporation
#ifndef SHERPA_ONNX_PYTHON_CSRC_WAVE_WRITER_H_
#define SHERPA_ONNX_PYTHON_CSRC_WAVE_WRITER_H_
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
namespace sherpa_onnx {
void PybindWaveWriter(py::module *m);
}
#endif // SHERPA_ONNX_PYTHON_CSRC_WAVE_WRITER_H_

View File

@@ -19,6 +19,7 @@ from _sherpa_onnx import (
VadModel,
VadModelConfig,
VoiceActivityDetector,
write_wave,
)
from .keyword_spotter import KeywordSpotter