Add non-streaming ASR APIs for node-addon-api (#868)
This commit is contained in:
@@ -39,7 +39,7 @@ npm install naudiodon2
|
||||
node ./test_vad_microphone.js
|
||||
```
|
||||
|
||||
## Streaming speech recognition with zipformer transducer
|
||||
## Streaming speech recognition with Zipformer transducer
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
|
||||
@@ -54,7 +54,7 @@ npm install naudiodon2
|
||||
node ./test_asr_streaming_transducer_microphone.js
|
||||
```
|
||||
|
||||
## Streaming speech recognition with zipformer CTC
|
||||
## Streaming speech recognition with Zipformer CTC
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-ctc-small-2024-03-18.tar.bz2
|
||||
@@ -72,3 +72,74 @@ npm install naudiodon2
|
||||
node ./test_asr_streaming_ctc_microphone.js
|
||||
node ./test_asr_streaming_ctc_hlg_microphone.js
|
||||
```
|
||||
|
||||
## Streaming speech recognition with Paraformer
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2
|
||||
tar xvf sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2
|
||||
rm sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2
|
||||
|
||||
node ./test_asr_streaming_paraformer.js
|
||||
|
||||
# To run the test with a microphone, you need to install the package naudiodon2
|
||||
npm install naudiodon2
|
||||
|
||||
node ./test_asr_streaming_paraformer_microphone.js
|
||||
```
|
||||
|
||||
## Non-streaming speech recognition with Zipformer transducer
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-zipformer-en-2023-04-01.tar.bz2
|
||||
tar xvf sherpa-onnx-zipformer-en-2023-04-01.tar.bz2
|
||||
rm sherpa-onnx-zipformer-en-2023-04-01.tar.bz2
|
||||
|
||||
node ./test_asr_non_streaming_transducer.js
|
||||
|
||||
# To run VAD + non-streaming ASR with transudcer using a microphone
|
||||
npm install naudiodon2
|
||||
node ./test_vad_asr_non_streaming_transducer_microphone.js
|
||||
```
|
||||
|
||||
## Non-streaming speech recognition with Whisper
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
rm sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
|
||||
node ./test_asr_non_streaming_whisper.js
|
||||
|
||||
# To run VAD + non-streaming ASR with Paraformer using a microphone
|
||||
npm install naudiodon2
|
||||
node ./test_vad_asr_non_streaming_whisper_microphone.js
|
||||
```
|
||||
|
||||
## Non-streaming speech recognition with NeMo CTC models
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2
|
||||
tar xvf sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2
|
||||
rm sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k.tar.bz2
|
||||
|
||||
node ./test_asr_non_streaming_nemo_ctc.js
|
||||
|
||||
# To run VAD + non-streaming ASR with Paraformer using a microphone
|
||||
npm install naudiodon2
|
||||
node ./test_vad_asr_non_streaming_nemo_ctc_microphone.js
|
||||
```
|
||||
|
||||
## Non-streaming speech recognition with Paraformer
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2
|
||||
tar xvf sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2
|
||||
rm sherpa-onnx-paraformer-zh-2023-03-28.tar.bz2
|
||||
|
||||
node ./test_asr_non_streaming_paraformer.js
|
||||
|
||||
# To run VAD + non-streaming ASR with Paraformer using a microphone
|
||||
npm install naudiodon2
|
||||
node ./test_vad_asr_non_streaming_paraformer_microphone.js
|
||||
```
|
||||
|
||||
50
nodejs-addon-examples/test_asr_non_streaming_nemo_ctc.js
Normal file
50
nodejs-addon-examples/test_asr_non_streaming_nemo_ctc.js
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
const performance = require('perf_hooks').performance;
|
||||
|
||||
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'nemoCtc': {
|
||||
'model':
|
||||
'./sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/model.onnx',
|
||||
},
|
||||
'tokens':
|
||||
'./sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
const waveFilename =
|
||||
'./sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/test_wavs/de-german.wav';
|
||||
|
||||
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
|
||||
console.log('Started')
|
||||
let start = performance.now();
|
||||
const stream = recognizer.createStream();
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
|
||||
|
||||
recognizer.decode(stream);
|
||||
result = recognizer.getResult(stream)
|
||||
let stop = performance.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
console.log(waveFilename)
|
||||
console.log('result\n', result)
|
||||
48
nodejs-addon-examples/test_asr_non_streaming_paraformer.js
Normal file
48
nodejs-addon-examples/test_asr_non_streaming_paraformer.js
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
const performance = require('perf_hooks').performance;
|
||||
|
||||
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'paraformer': {
|
||||
'model': './sherpa-onnx-paraformer-zh-2023-03-28/model.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-paraformer-zh-2023-03-28/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
const waveFilename =
|
||||
'./sherpa-onnx-paraformer-zh-2023-03-28/test_wavs/5-henan.wav';
|
||||
|
||||
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
|
||||
console.log('Started')
|
||||
let start = performance.now();
|
||||
const stream = recognizer.createStream();
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
|
||||
|
||||
recognizer.decode(stream);
|
||||
result = recognizer.getResult(stream)
|
||||
let stop = performance.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
console.log(waveFilename)
|
||||
console.log('result\n', result)
|
||||
52
nodejs-addon-examples/test_asr_non_streaming_transducer.js
Normal file
52
nodejs-addon-examples/test_asr_non_streaming_transducer.js
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
const performance = require('perf_hooks').performance;
|
||||
|
||||
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'transducer': {
|
||||
'encoder':
|
||||
'./sherpa-onnx-zipformer-en-2023-04-01/encoder-epoch-99-avg-1.int8.onnx',
|
||||
'decoder':
|
||||
'./sherpa-onnx-zipformer-en-2023-04-01/decoder-epoch-99-avg-1.onnx',
|
||||
'joiner':
|
||||
'./sherpa-onnx-zipformer-en-2023-04-01/joiner-epoch-99-avg-1.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-zipformer-en-2023-04-01/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
const waveFilename = './sherpa-onnx-zipformer-en-2023-04-01/test_wavs/1.wav';
|
||||
|
||||
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
|
||||
console.log('Started')
|
||||
let start = performance.now();
|
||||
const stream = recognizer.createStream();
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
|
||||
|
||||
recognizer.decode(stream);
|
||||
result = recognizer.getResult(stream)
|
||||
let stop = performance.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
console.log(waveFilename)
|
||||
console.log('result\n', result)
|
||||
48
nodejs-addon-examples/test_asr_non_streaming_whisper.js
Normal file
48
nodejs-addon-examples/test_asr_non_streaming_whisper.js
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
const performance = require('perf_hooks').performance;
|
||||
|
||||
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'whisper': {
|
||||
'encoder': './sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx',
|
||||
'decoder': './sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
const waveFilename = './sherpa-onnx-whisper-tiny.en/test_wavs/0.wav';
|
||||
|
||||
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
|
||||
console.log('Started')
|
||||
let start = performance.now();
|
||||
const stream = recognizer.createStream();
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
|
||||
|
||||
recognizer.decode(stream);
|
||||
result = recognizer.getResult(stream)
|
||||
let stop = performance.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
console.log(waveFilename)
|
||||
console.log('result\n', result)
|
||||
56
nodejs-addon-examples/test_asr_streaming_paraformer.js
Normal file
56
nodejs-addon-examples/test_asr_streaming_paraformer.js
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
const performance = require('perf_hooks').performance;
|
||||
|
||||
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'paraformer': {
|
||||
'encoder':
|
||||
'./sherpa-onnx-streaming-paraformer-bilingual-zh-en/encoder.int8.onnx',
|
||||
'decoder':
|
||||
'./sherpa-onnx-streaming-paraformer-bilingual-zh-en/decoder.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-streaming-paraformer-bilingual-zh-en/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
const waveFilename =
|
||||
'./sherpa-onnx-streaming-paraformer-bilingual-zh-en/test_wavs/0.wav';
|
||||
|
||||
const recognizer = new sherpa_onnx.OnlineRecognizer(config);
|
||||
console.log('Started')
|
||||
let start = performance.now();
|
||||
const stream = recognizer.createStream();
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
|
||||
|
||||
const tailPadding = new Float32Array(wave.sampleRate * 0.4);
|
||||
stream.acceptWaveform({samples: tailPadding, sampleRate: wave.sampleRate});
|
||||
|
||||
while (recognizer.isReady(stream)) {
|
||||
recognizer.decode(stream);
|
||||
}
|
||||
result = recognizer.getResult(stream)
|
||||
let stop = performance.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
console.log(waveFilename)
|
||||
console.log('result\n', result)
|
||||
@@ -0,0 +1,104 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
//
|
||||
const portAudio = require('naudiodon2');
|
||||
// console.log(portAudio.getDevices());
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createOnlineRecognizer() {
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'paraformer': {
|
||||
'encoder':
|
||||
'./sherpa-onnx-streaming-paraformer-bilingual-zh-en/encoder.int8.onnx',
|
||||
'decoder':
|
||||
'./sherpa-onnx-streaming-paraformer-bilingual-zh-en/decoder.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-streaming-paraformer-bilingual-zh-en/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
},
|
||||
'decodingMethod': 'greedy_search',
|
||||
'maxActivePaths': 4,
|
||||
'enableEndpoint': true,
|
||||
'rule1MinTrailingSilence': 2.4,
|
||||
'rule2MinTrailingSilence': 1.2,
|
||||
'rule3MinUtteranceLength': 20
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OnlineRecognizer(config);
|
||||
}
|
||||
|
||||
const recognizer = createOnlineRecognizer();
|
||||
const stream = recognizer.createStream();
|
||||
|
||||
let lastText = '';
|
||||
let segmentIndex = 0;
|
||||
|
||||
const ai = new portAudio.AudioIO({
|
||||
inOptions: {
|
||||
channelCount: 1,
|
||||
closeOnError: true, // Close the stream if an audio error is detected, if
|
||||
// set false then just log the error
|
||||
deviceId: -1, // Use -1 or omit the deviceId to select the default device
|
||||
sampleFormat: portAudio.SampleFormatFloat32,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
}
|
||||
});
|
||||
|
||||
const display = new sherpa_onnx.Display(50);
|
||||
|
||||
ai.on('data', data => {
|
||||
const samples = new Float32Array(data.buffer);
|
||||
|
||||
stream.acceptWaveform(
|
||||
{sampleRate: recognizer.config.featConfig.sampleRate, samples: samples});
|
||||
|
||||
while (recognizer.isReady(stream)) {
|
||||
recognizer.decode(stream);
|
||||
}
|
||||
|
||||
const isEndpoint = recognizer.isEndpoint(stream);
|
||||
let text = recognizer.getResult(stream).text.toLowerCase();
|
||||
|
||||
if (isEndpoint) {
|
||||
// for online paraformer models, we have to manually padding on endpoint
|
||||
// so that the last word can be recognized
|
||||
const tailPadding =
|
||||
new Float32Array(recognizer.config.featConfig.sampleRate * 0.4);
|
||||
stream.acceptWaveform({
|
||||
samples: tailPadding,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
});
|
||||
while (recognizer.isReady(stream)) {
|
||||
recognizer.decode(stream);
|
||||
}
|
||||
text = recognizer.getResult(stream).text.toLowerCase();
|
||||
}
|
||||
|
||||
if (text.length > 0 && lastText != text) {
|
||||
lastText = text;
|
||||
display.print(segmentIndex, lastText);
|
||||
}
|
||||
if (isEndpoint) {
|
||||
if (text.length > 0) {
|
||||
lastText = text;
|
||||
segmentIndex += 1;
|
||||
}
|
||||
recognizer.reset(stream)
|
||||
}
|
||||
});
|
||||
|
||||
ai.on('close', () => {
|
||||
console.log('Free resources');
|
||||
stream.free();
|
||||
recognizer.free();
|
||||
});
|
||||
|
||||
ai.start();
|
||||
console.log('Started! Please speak')
|
||||
@@ -0,0 +1,110 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
//
|
||||
const portAudio = require('naudiodon2');
|
||||
// console.log(portAudio.getDevices());
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createRecognizer() {
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'nemoCtc': {
|
||||
'model':
|
||||
'./sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/model.onnx',
|
||||
},
|
||||
'tokens':
|
||||
'./sherpa-onnx-nemo-fast-conformer-ctc-be-de-en-es-fr-hr-it-pl-ru-uk-20k/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
function createVad() {
|
||||
// please download silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
const config = {
|
||||
sileroVad: {
|
||||
model: './silero_vad.onnx',
|
||||
threshold: 0.5,
|
||||
minSpeechDuration: 0.25,
|
||||
minSilenceDuration: 0.5,
|
||||
windowSize: 512,
|
||||
},
|
||||
sampleRate: 16000,
|
||||
debug: true,
|
||||
numThreads: 1,
|
||||
};
|
||||
|
||||
const bufferSizeInSeconds = 60;
|
||||
|
||||
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
|
||||
}
|
||||
|
||||
const recognizer = createRecognizer();
|
||||
const vad = createVad();
|
||||
|
||||
const bufferSizeInSeconds = 30;
|
||||
const buffer =
|
||||
new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate);
|
||||
|
||||
const ai = new portAudio.AudioIO({
|
||||
inOptions: {
|
||||
channelCount: 1,
|
||||
closeOnError: true, // Close the stream if an audio error is detected, if
|
||||
// set false then just log the error
|
||||
deviceId: -1, // Use -1 or omit the deviceId to select the default device
|
||||
sampleFormat: portAudio.SampleFormatFloat32,
|
||||
sampleRate: vad.config.sampleRate
|
||||
}
|
||||
});
|
||||
|
||||
let printed = false;
|
||||
let index = 0;
|
||||
ai.on('data', data => {
|
||||
const windowSize = vad.config.sileroVad.windowSize;
|
||||
buffer.push(new Float32Array(data.buffer));
|
||||
while (buffer.size() > windowSize) {
|
||||
const samples = buffer.get(buffer.head(), windowSize);
|
||||
buffer.pop(windowSize);
|
||||
vad.acceptWaveform(samples);
|
||||
}
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform({
|
||||
samples: segment.samples,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
});
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${index}: ${text}`);
|
||||
|
||||
const filename = `${index}-${text}-${
|
||||
new Date()
|
||||
.toLocaleTimeString('en-US', {hour12: false})
|
||||
.split(' ')[0]}.wav`;
|
||||
sherpa_onnx.writeWave(
|
||||
filename,
|
||||
{samples: segment.samples, sampleRate: vad.config.sampleRate})
|
||||
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ai.start();
|
||||
console.log('Started! Please speak')
|
||||
@@ -0,0 +1,108 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
//
|
||||
const portAudio = require('naudiodon2');
|
||||
// console.log(portAudio.getDevices());
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createRecognizer() {
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'paraformer': {
|
||||
'model': './sherpa-onnx-paraformer-zh-2023-03-28/model.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-paraformer-zh-2023-03-28/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
function createVad() {
|
||||
// please download silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
const config = {
|
||||
sileroVad: {
|
||||
model: './silero_vad.onnx',
|
||||
threshold: 0.5,
|
||||
minSpeechDuration: 0.25,
|
||||
minSilenceDuration: 0.5,
|
||||
windowSize: 512,
|
||||
},
|
||||
sampleRate: 16000,
|
||||
debug: true,
|
||||
numThreads: 1,
|
||||
};
|
||||
|
||||
const bufferSizeInSeconds = 60;
|
||||
|
||||
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
|
||||
}
|
||||
|
||||
const recognizer = createRecognizer();
|
||||
const vad = createVad();
|
||||
|
||||
const bufferSizeInSeconds = 30;
|
||||
const buffer =
|
||||
new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate);
|
||||
|
||||
const ai = new portAudio.AudioIO({
|
||||
inOptions: {
|
||||
channelCount: 1,
|
||||
closeOnError: true, // Close the stream if an audio error is detected, if
|
||||
// set false then just log the error
|
||||
deviceId: -1, // Use -1 or omit the deviceId to select the default device
|
||||
sampleFormat: portAudio.SampleFormatFloat32,
|
||||
sampleRate: vad.config.sampleRate
|
||||
}
|
||||
});
|
||||
|
||||
let printed = false;
|
||||
let index = 0;
|
||||
ai.on('data', data => {
|
||||
const windowSize = vad.config.sileroVad.windowSize;
|
||||
buffer.push(new Float32Array(data.buffer));
|
||||
while (buffer.size() > windowSize) {
|
||||
const samples = buffer.get(buffer.head(), windowSize);
|
||||
buffer.pop(windowSize);
|
||||
vad.acceptWaveform(samples);
|
||||
}
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform({
|
||||
samples: segment.samples,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
});
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${index}: ${text}`);
|
||||
|
||||
const filename = `${index}-${text}-${
|
||||
new Date()
|
||||
.toLocaleTimeString('en-US', {hour12: false})
|
||||
.split(' ')[0]}.wav`;
|
||||
sherpa_onnx.writeWave(
|
||||
filename,
|
||||
{samples: segment.samples, sampleRate: vad.config.sampleRate})
|
||||
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ai.start();
|
||||
console.log('Started! Please speak')
|
||||
@@ -0,0 +1,113 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
//
|
||||
const portAudio = require('naudiodon2');
|
||||
// console.log(portAudio.getDevices());
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createRecognizer() {
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'transducer': {
|
||||
'encoder':
|
||||
'./sherpa-onnx-zipformer-en-2023-04-01/encoder-epoch-99-avg-1.int8.onnx',
|
||||
'decoder':
|
||||
'./sherpa-onnx-zipformer-en-2023-04-01/decoder-epoch-99-avg-1.onnx',
|
||||
'joiner':
|
||||
'./sherpa-onnx-zipformer-en-2023-04-01/joiner-epoch-99-avg-1.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-zipformer-en-2023-04-01/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
function createVad() {
|
||||
// please download silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
const config = {
|
||||
sileroVad: {
|
||||
model: './silero_vad.onnx',
|
||||
threshold: 0.5,
|
||||
minSpeechDuration: 0.25,
|
||||
minSilenceDuration: 0.5,
|
||||
windowSize: 512,
|
||||
},
|
||||
sampleRate: 16000,
|
||||
debug: true,
|
||||
numThreads: 1,
|
||||
};
|
||||
|
||||
const bufferSizeInSeconds = 60;
|
||||
|
||||
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
|
||||
}
|
||||
|
||||
const recognizer = createRecognizer();
|
||||
const vad = createVad();
|
||||
|
||||
const bufferSizeInSeconds = 30;
|
||||
const buffer =
|
||||
new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate);
|
||||
|
||||
const ai = new portAudio.AudioIO({
|
||||
inOptions: {
|
||||
channelCount: 1,
|
||||
closeOnError: true, // Close the stream if an audio error is detected, if
|
||||
// set false then just log the error
|
||||
deviceId: -1, // Use -1 or omit the deviceId to select the default device
|
||||
sampleFormat: portAudio.SampleFormatFloat32,
|
||||
sampleRate: vad.config.sampleRate
|
||||
}
|
||||
});
|
||||
|
||||
let printed = false;
|
||||
let index = 0;
|
||||
ai.on('data', data => {
|
||||
const windowSize = vad.config.sileroVad.windowSize;
|
||||
buffer.push(new Float32Array(data.buffer));
|
||||
while (buffer.size() > windowSize) {
|
||||
const samples = buffer.get(buffer.head(), windowSize);
|
||||
buffer.pop(windowSize);
|
||||
vad.acceptWaveform(samples);
|
||||
}
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform({
|
||||
samples: segment.samples,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
});
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${index}: ${text}`);
|
||||
|
||||
const filename = `${index}-${text}-${
|
||||
new Date()
|
||||
.toLocaleTimeString('en-US', {hour12: false})
|
||||
.split(' ')[0]}.wav`;
|
||||
sherpa_onnx.writeWave(
|
||||
filename,
|
||||
{samples: segment.samples, sampleRate: vad.config.sampleRate})
|
||||
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ai.start();
|
||||
console.log('Started! Please speak')
|
||||
@@ -0,0 +1,109 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
//
|
||||
const portAudio = require('naudiodon2');
|
||||
// console.log(portAudio.getDevices());
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createRecognizer() {
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'whisper': {
|
||||
'encoder': './sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx',
|
||||
'decoder': './sherpa-onnx-whisper-tiny.en/tiny.en-decoder.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-whisper-tiny.en/tiny.en-tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
function createVad() {
|
||||
// please download silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
const config = {
|
||||
sileroVad: {
|
||||
model: './silero_vad.onnx',
|
||||
threshold: 0.5,
|
||||
minSpeechDuration: 0.25,
|
||||
minSilenceDuration: 0.5,
|
||||
windowSize: 512,
|
||||
},
|
||||
sampleRate: 16000,
|
||||
debug: true,
|
||||
numThreads: 1,
|
||||
};
|
||||
|
||||
const bufferSizeInSeconds = 60;
|
||||
|
||||
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
|
||||
}
|
||||
|
||||
const recognizer = createRecognizer();
|
||||
const vad = createVad();
|
||||
|
||||
const bufferSizeInSeconds = 30;
|
||||
const buffer =
|
||||
new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate);
|
||||
|
||||
const ai = new portAudio.AudioIO({
|
||||
inOptions: {
|
||||
channelCount: 1,
|
||||
closeOnError: true, // Close the stream if an audio error is detected, if
|
||||
// set false then just log the error
|
||||
deviceId: -1, // Use -1 or omit the deviceId to select the default device
|
||||
sampleFormat: portAudio.SampleFormatFloat32,
|
||||
sampleRate: vad.config.sampleRate
|
||||
}
|
||||
});
|
||||
|
||||
let printed = false;
|
||||
let index = 0;
|
||||
ai.on('data', data => {
|
||||
const windowSize = vad.config.sileroVad.windowSize;
|
||||
buffer.push(new Float32Array(data.buffer));
|
||||
while (buffer.size() > windowSize) {
|
||||
const samples = buffer.get(buffer.head(), windowSize);
|
||||
buffer.pop(windowSize);
|
||||
vad.acceptWaveform(samples);
|
||||
}
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform({
|
||||
samples: segment.samples,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
});
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${index}: ${text}`);
|
||||
|
||||
const filename = `${index}-${text}-${
|
||||
new Date()
|
||||
.toLocaleTimeString('en-US', {hour12: false})
|
||||
.split(' ')[0]}.wav`;
|
||||
sherpa_onnx.writeWave(
|
||||
filename,
|
||||
{samples: segment.samples, sampleRate: vad.config.sampleRate})
|
||||
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ai.start();
|
||||
console.log('Started! Please speak')
|
||||
Reference in New Issue
Block a user