Add JavaScript API for Moonshine models (#1480)
This commit is contained in:
@@ -112,6 +112,8 @@ The following tables list the examples in this folder.
|
||||
|[./test_asr_non_streaming_transducer.js](./test_asr_non_streaming_transducer.js)|Non-streaming speech recognition from a file with a Zipformer transducer model|
|
||||
|[./test_asr_non_streaming_whisper.js](./test_asr_non_streaming_whisper.js)| Non-streaming speech recognition from a file using [Whisper](https://github.com/openai/whisper)|
|
||||
|[./test_vad_with_non_streaming_asr_whisper.js](./test_vad_with_non_streaming_asr_whisper.js)| Non-streaming speech recognition from a file using [Whisper](https://github.com/openai/whisper) + [Silero VAD](https://github.com/snakers4/silero-vad)|
|
||||
|[./test_asr_non_streaming_moonshine.js](./test_asr_non_streaming_moonshine.js)|Non-streaming speech recognition from a file using [Moonshine](https://github.com/usefulsensors/moonshine)|
|
||||
|[./test_vad_with_non_streaming_asr_moonshine.js](./test_vad_with_non_streaming_asr_moonshine.js)| Non-streaming speech recognition from a file using [Moonshine](https://github.com/usefulsensors/moonshine) + [Silero VAD](https://github.com/snakers4/silero-vad)|
|
||||
|[./test_asr_non_streaming_nemo_ctc.js](./test_asr_non_streaming_nemo_ctc.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) CTC model with greedy search|
|
||||
|[./test_asr_non_streaming_paraformer.js](./test_asr_non_streaming_paraformer.js)|Non-streaming speech recognition from a file using [Paraformer](https://github.com/alibaba-damo-academy/FunASR)|
|
||||
|[./test_asr_non_streaming_sense_voice.js](./test_asr_non_streaming_sense_voice.js)|Non-streaming speech recognition from a file using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)|
|
||||
@@ -122,6 +124,7 @@ The following tables list the examples in this folder.
|
||||
|---|---|
|
||||
|[./test_vad_asr_non_streaming_transducer_microphone.js](./test_vad_asr_non_streaming_transducer_microphone.js)|VAD + Non-streaming speech recognition from a microphone using a Zipformer transducer model|
|
||||
|[./test_vad_asr_non_streaming_whisper_microphone.js](./test_vad_asr_non_streaming_whisper_microphone.js)|VAD + Non-streaming speech recognition from a microphone using [Whisper](https://github.com/openai/whisper)|
|
||||
|[./test_vad_asr_non_streaming_moonshine_microphone.js](./test_vad_asr_non_streaming_moonshine_microphone.js)|VAD + Non-streaming speech recognition from a microphone using [Moonshine](https://github.com/usefulsensors/moonshine)|
|
||||
|[./test_vad_asr_non_streaming_nemo_ctc_microphone.js](./test_vad_asr_non_streaming_nemo_ctc_microphone.js)|VAD + Non-streaming speech recognition from a microphone using a [NeMo](https://github.com/NVIDIA/NeMo) CTC model with greedy search|
|
||||
|[./test_vad_asr_non_streaming_paraformer_microphone.js](./test_vad_asr_non_streaming_paraformer_microphone.js)|VAD + Non-streaming speech recognition from a microphone using [Paraformer](https://github.com/alibaba-damo-academy/FunASR)|
|
||||
|[./test_vad_asr_non_streaming_sense_voice_microphone.js](./test_vad_asr_non_streaming_sense_voice_microphone.js)|VAD + Non-streaming speech recognition from a microphone using [SenseVoice](https://github.com/FunAudioLLM/SenseVoice)|
|
||||
@@ -260,6 +263,33 @@ npm install naudiodon2
|
||||
node ./test_vad_asr_non_streaming_whisper_microphone.js
|
||||
```
|
||||
|
||||
### Non-streaming speech recognition with Moonshine
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-moonshine-tiny-en-int8.tar.bz2
|
||||
tar xvf sherpa-onnx-moonshine-tiny-en-int8.tar.bz2
|
||||
rm sherpa-onnx-moonshine-tiny-en-int8.tar.bz2
|
||||
|
||||
node ./test_asr_non_streaming_moonshine.js
|
||||
|
||||
# To run VAD + non-streaming ASR with Moonshine using a microphone
|
||||
npm install naudiodon2
|
||||
node ./test_vad_asr_non_streaming_moonshine_microphone.js
|
||||
```
|
||||
|
||||
### Non-streaming speech recognition with Moonshine + VAD
|
||||
|
||||
```bash
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-moonshine-tiny-en-int8.tar.bz2
|
||||
tar xvf sherpa-onnx-moonshine-tiny-en-int8.tar.bz2
|
||||
rm sherpa-onnx-moonshine-tiny-en-int8.tar.bz2
|
||||
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/Obama.wav
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
|
||||
node ./test_vad_with_non_streaming_asr_moonshine.js
|
||||
```
|
||||
|
||||
### Non-streaming speech recognition with Whisper + VAD
|
||||
|
||||
```bash
|
||||
|
||||
50
nodejs-addon-examples/test_asr_non_streaming_moonshine.js
Normal file
50
nodejs-addon-examples/test_asr_non_streaming_moonshine.js
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'moonshine': {
|
||||
'preprocessor': './sherpa-onnx-moonshine-tiny-en-int8/preprocess.onnx',
|
||||
'encoder': './sherpa-onnx-moonshine-tiny-en-int8/encode.int8.onnx',
|
||||
'uncachedDecoder':
|
||||
'./sherpa-onnx-moonshine-tiny-en-int8/uncached_decode.int8.onnx',
|
||||
'cachedDecoder':
|
||||
'./sherpa-onnx-moonshine-tiny-en-int8/cached_decode.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-moonshine-tiny-en-int8/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
const waveFilename = './sherpa-onnx-moonshine-tiny-en-int8/test_wavs/0.wav';
|
||||
|
||||
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
|
||||
console.log('Started')
|
||||
let start = Date.now();
|
||||
const stream = recognizer.createStream();
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
|
||||
|
||||
recognizer.decode(stream);
|
||||
result = recognizer.getResult(stream)
|
||||
let stop = Date.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
console.log(waveFilename)
|
||||
console.log('result\n', result)
|
||||
@@ -0,0 +1,113 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
//
|
||||
const portAudio = require('naudiodon2');
|
||||
// console.log(portAudio.getDevices());
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createRecognizer() {
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'moonshine': {
|
||||
'preprocessor': './sherpa-onnx-moonshine-tiny-en-int8/preprocess.onnx',
|
||||
'encoder': './sherpa-onnx-moonshine-tiny-en-int8/encode.int8.onnx',
|
||||
'uncachedDecoder':
|
||||
'./sherpa-onnx-moonshine-tiny-en-int8/uncached_decode.int8.onnx',
|
||||
'cachedDecoder':
|
||||
'./sherpa-onnx-moonshine-tiny-en-int8/cached_decode.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-moonshine-tiny-en-int8/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
function createVad() {
|
||||
// please download silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
const config = {
|
||||
sileroVad: {
|
||||
model: './silero_vad.onnx',
|
||||
threshold: 0.5,
|
||||
minSpeechDuration: 0.25,
|
||||
minSilenceDuration: 0.5,
|
||||
windowSize: 512,
|
||||
},
|
||||
sampleRate: 16000,
|
||||
debug: true,
|
||||
numThreads: 1,
|
||||
};
|
||||
|
||||
const bufferSizeInSeconds = 60;
|
||||
|
||||
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
|
||||
}
|
||||
|
||||
const recognizer = createRecognizer();
|
||||
const vad = createVad();
|
||||
|
||||
const bufferSizeInSeconds = 30;
|
||||
const buffer =
|
||||
new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate);
|
||||
|
||||
const ai = new portAudio.AudioIO({
|
||||
inOptions: {
|
||||
channelCount: 1,
|
||||
closeOnError: true, // Close the stream if an audio error is detected, if
|
||||
// set false then just log the error
|
||||
deviceId: -1, // Use -1 or omit the deviceId to select the default device
|
||||
sampleFormat: portAudio.SampleFormatFloat32,
|
||||
sampleRate: vad.config.sampleRate
|
||||
}
|
||||
});
|
||||
|
||||
let printed = false;
|
||||
let index = 0;
|
||||
ai.on('data', data => {
|
||||
const windowSize = vad.config.sileroVad.windowSize;
|
||||
buffer.push(new Float32Array(data.buffer));
|
||||
while (buffer.size() > windowSize) {
|
||||
const samples = buffer.get(buffer.head(), windowSize);
|
||||
buffer.pop(windowSize);
|
||||
vad.acceptWaveform(samples);
|
||||
}
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform({
|
||||
samples: segment.samples,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
});
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${index}: ${text}`);
|
||||
|
||||
const filename = `${index}-${text}-${
|
||||
new Date()
|
||||
.toLocaleTimeString('en-US', {hour12: false})
|
||||
.split(' ')[0]}.wav`;
|
||||
sherpa_onnx.writeWave(
|
||||
filename,
|
||||
{samples: segment.samples, sampleRate: vad.config.sampleRate});
|
||||
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ai.start();
|
||||
console.log('Started! Please speak')
|
||||
@@ -0,0 +1,132 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createRecognizer() {
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'moonshine': {
|
||||
'preprocessor': './sherpa-onnx-moonshine-tiny-en-int8/preprocess.onnx',
|
||||
'encoder': './sherpa-onnx-moonshine-tiny-en-int8/encode.int8.onnx',
|
||||
'uncachedDecoder':
|
||||
'./sherpa-onnx-moonshine-tiny-en-int8/uncached_decode.int8.onnx',
|
||||
'cachedDecoder':
|
||||
'./sherpa-onnx-moonshine-tiny-en-int8/cached_decode.int8.onnx',
|
||||
},
|
||||
'tokens': './sherpa-onnx-moonshine-tiny-en-int8/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
}
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
function createVad() {
|
||||
// please download silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
const config = {
|
||||
sileroVad: {
|
||||
model: './silero_vad.onnx',
|
||||
threshold: 0.5,
|
||||
minSpeechDuration: 0.25,
|
||||
minSilenceDuration: 0.5,
|
||||
maxSpeechDuration: 5,
|
||||
windowSize: 512,
|
||||
},
|
||||
sampleRate: 16000,
|
||||
debug: true,
|
||||
numThreads: 1,
|
||||
};
|
||||
|
||||
const bufferSizeInSeconds = 60;
|
||||
|
||||
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
|
||||
}
|
||||
|
||||
const recognizer = createRecognizer();
|
||||
const vad = createVad();
|
||||
|
||||
// please download ./Obama.wav from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const waveFilename = './Obama.wav';
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
|
||||
if (wave.sampleRate != recognizer.config.featConfig.sampleRate) {
|
||||
throw new Error(
|
||||
'Expected sample rate: ${recognizer.config.featConfig.sampleRate}. Given: ${wave.sampleRate}');
|
||||
}
|
||||
|
||||
console.log('Started')
|
||||
let start = Date.now();
|
||||
|
||||
const windowSize = vad.config.sileroVad.windowSize;
|
||||
for (let i = 0; i < wave.samples.length; i += windowSize) {
|
||||
const thisWindow = wave.samples.subarray(i, i + windowSize);
|
||||
vad.acceptWaveform(thisWindow);
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
|
||||
let start_time = segment.start / wave.sampleRate;
|
||||
let end_time = start_time + segment.samples.length / wave.sampleRate;
|
||||
|
||||
start_time = start_time.toFixed(2);
|
||||
end_time = end_time.toFixed(2);
|
||||
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform(
|
||||
{samples: segment.samples, sampleRate: wave.sampleRate});
|
||||
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${start_time} -- ${end_time}: ${text}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vad.flush();
|
||||
|
||||
while (!vad.isEmpty()) {
|
||||
const segment = vad.front();
|
||||
vad.pop();
|
||||
|
||||
let start_time = segment.start / wave.sampleRate;
|
||||
let end_time = start_time + segment.samples.length / wave.sampleRate;
|
||||
|
||||
start_time = start_time.toFixed(2);
|
||||
end_time = end_time.toFixed(2);
|
||||
|
||||
const stream = recognizer.createStream();
|
||||
stream.acceptWaveform(
|
||||
{samples: segment.samples, sampleRate: wave.sampleRate});
|
||||
|
||||
recognizer.decode(stream);
|
||||
const r = recognizer.getResult(stream);
|
||||
if (r.text.length > 0) {
|
||||
const text = r.text.toLowerCase().trim();
|
||||
console.log(`${start_time} -- ${end_time}: ${text}`);
|
||||
}
|
||||
}
|
||||
|
||||
let stop = Date.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'seconds')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'seconds')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
Reference in New Issue
Block a user