Add VAD + Non-streaming ASR example for JavaScript API. (#1170)
This commit is contained in:
@@ -93,6 +93,7 @@ The following tables list the examples in this folder.
|
||||
|---|---|
|
||||
|[./test_asr_non_streaming_transducer.js](./test_asr_non_streaming_transducer.js)|Non-streaming speech recognition from a file with a Zipformer transducer model|
|
||||
|[./test_asr_non_streaming_whisper.js](./test_asr_non_streaming_whisper.js)| Non-streaming speech recognition from a file using [Whisper](https://github.com/openai/whisper)|
|
||||
|[./test_vad_with_non_streaming_asr_whisper.js](./test_vad_with_non_streaming_asr_whisper.js)| Non-streaming speech recognition from a file using [Whisper](https://github.com/openai/whisper) + [Silero VAD](https://github.com/snakers4/silero-vad)|
|
||||
|[./test_asr_non_streaming_nemo_ctc.js](./test_asr_non_streaming_nemo_ctc.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) CTC model with greedy search|
|
||||
|[./test_asr_non_streaming_paraformer.js](./test_asr_non_streaming_paraformer.js)|Non-streaming speech recognition from a file using [Paraformer](https://github.com/alibaba-damo-academy/FunASR)|
|
||||
|[./test_asr_non_streaming_sense_voice.js](./test_asr_non_streaming_sense_voice.js)|Non-streaming speech recognition from a file using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)|
|
||||
@@ -221,11 +222,24 @@ rm sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
|
||||
node ./test_asr_non_streaming_whisper.js
|
||||
|
||||
# To run VAD + non-streaming ASR with Paraformer using a microphone
|
||||
# To run VAD + non-streaming ASR with Whisper using a microphone
|
||||
npm install naudiodon2
|
||||
node ./test_vad_asr_non_streaming_whisper_microphone.js
|
||||
```
|
||||
|
||||
### Non-streaming speech recognition with Whisper + VAD
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
rm sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
|
||||
node ./test_vad_with_non_streaming_asr_whisper.js
|
||||
```
|
||||
|
||||
### Non-streaming speech recognition with NeMo CTC models
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"sherpa-onnx-node": "^1.10.17"
|
||||
"sherpa-onnx-node": "^1.10.18"
|
||||
}
|
||||
}
|
||||
|
||||
127
nodejs-addon-examples/test_vad_with_non_streaming_asr_whisper.js
Normal file
127
nodejs-addon-examples/test_vad_with_non_streaming_asr_whisper.js
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createRecognizer() {
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'whisper': {
|
||||
'encoder': './sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx',
|
||||
'decoder': './sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
function createVad() {
|
||||
// please download silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
const config = {
|
||||
sileroVad: {
|
||||
model: './silero_vad.onnx',
|
||||
threshold: 0.5,
|
||||
minSpeechDuration: 0.25,
|
||||
minSilenceDuration: 0.5,
|
||||
windowSize: 512,
|
||||
},
|
||||
sampleRate: 16000,
|
||||
debug: true,
|
||||
numThreads: 1,
|
||||
};
|
||||
|
||||
const bufferSizeInSeconds = 60;
|
||||
|
||||
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
|
||||
}
|
||||
|
||||
const recognizer = createRecognizer();
|
||||
const vad = createVad();
|
||||
|
||||
// please download ./Obama.wav from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const waveFilename = './Obama.wav';
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
|
||||
if (wave.sampleRate != recognizer.config.featConfig.sampleRate) {
|
||||
throw new Error(
|
||||
'Expected sample rate: ${recognizer.config.featConfig.sampleRate}. Given: ${wave.sampleRate}');
|
||||
}
|
||||
|
||||
console.log('Started')
|
||||
let start = Date.now();
|
||||
|
||||
const windowSize = vad.config.sileroVad.windowSize;
|
||||
for (let i = 0; i < wave.samples.length; i += windowSize) {
|
||||
const thisWindow = wave.samples.subarray(i, i + windowSize);
|
||||
vad.acceptWaveform(thisWindow);
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
|
||||
let start_time = segment.start / wave.sampleRate;
|
||||
let end_time = start_time + segment.samples.length / wave.sampleRate;
|
||||
|
||||
start_time = start_time.toFixed(2);
|
||||
end_time = end_time.toFixed(2);
|
||||
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform(
|
||||
{samples: segment.samples, sampleRate: wave.sampleRate});
|
||||
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${start_time} -- ${end_time}: ${text}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vad.flush();
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
|
||||
let start_time = segment.start / wave.sampleRate;
|
||||
let end_time = start_time + segment.samples.length / wave.sampleRate;
|
||||
|
||||
start_time = start_time.toFixed(2);
|
||||
end_time = end_time.toFixed(2);
|
||||
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform(
|
||||
{samples: segment.samples, sampleRate: wave.sampleRate});
|
||||
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${start_time} -- ${end_time}: ${text}`);
|
||||
}
|
||||
}
|
||||
|
||||
let stop = Date.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
Reference in New Issue
Block a user