Add spoken language identification for node-addon-api (#872)

This commit is contained in:
Fangjun Kuang
2024-05-13 20:26:11 +08:00
committed by GitHub
parent 031134b4d4
commit 939fdd942c
13 changed files with 445 additions and 1 deletions

View File

@@ -183,3 +183,21 @@ rm vits-icefall-zh-aishell3.tar.bz2
node ./test_tts_non_streaming_vits_zh_aishell3.js
```
## Spoken language identification with Whisper multi-lingual models
```bash
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.tar.bz2
tar xvf sherpa-onnx-whisper-tiny.tar.bz2
rm sherpa-onnx-whisper-tiny.tar.bz2
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/spoken-language-identification-test-wavs.tar.bz2
tar xvf spoken-language-identification-test-wavs.tar.bz2
rm spoken-language-identification-test-wavs.tar.bz2
node ./test_spoken_language_identification.js
# To run VAD + spoken language identification using a microphone
npm install naudiodon2
node ./test_vad_spoken_language_identification_microphone.js
```

View File

@@ -0,0 +1,40 @@
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
const sherpa_onnx = require('sherpa-onnx-node');
function createSpokenLanguageID() {
const config = {
whisper: {
encoder: './sherpa-onnx-whisper-tiny/tiny-encoder.int8.onnx',
decoder: './sherpa-onnx-whisper-tiny/tiny-decoder.int8.onnx',
},
debug: true,
numThreads: 1,
provider: 'cpu',
};
return new sherpa_onnx.SpokenLanguageIdentification(config);
}
const slid = createSpokenLanguageID();
const testWaves = [
'./spoken-language-identification-test-wavs/ar-arabic.wav',
'./spoken-language-identification-test-wavs/de-german.wav',
'./spoken-language-identification-test-wavs/en-english.wav',
'./spoken-language-identification-test-wavs/fr-french.wav',
'./spoken-language-identification-test-wavs/pt-portuguese.wav',
'./spoken-language-identification-test-wavs/es-spanish.wav',
'./spoken-language-identification-test-wavs/zh-chinese.wav',
];
const display = new Intl.DisplayNames(['en'], {type: 'language'})
for (let f of testWaves) {
const stream = slid.createStream();
const wave = sherpa_onnx.readWave(f);
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
const lang = slid.compute(stream);
console.log(f.split('/')[2], lang, display.of(lang));
}

View File

@@ -26,7 +26,7 @@ function createVad() {
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
}
vad = createVad();
const vad = createVad();
const bufferSizeInSeconds = 30;
const buffer =

View File

@@ -0,0 +1,114 @@
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
const portAudio = require('naudiodon2');
// console.log(portAudio.getDevices());
const sherpa_onnx = require('sherpa-onnx-node');
function createVad() {
// please download silero_vad.onnx from
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
const config = {
sileroVad: {
model: './silero_vad.onnx',
threshold: 0.5,
minSpeechDuration: 0.25,
minSilenceDuration: 0.5,
windowSize: 512,
},
sampleRate: 16000,
debug: true,
numThreads: 1,
};
const bufferSizeInSeconds = 60;
return new sherpa_onnx.Vad(config, bufferSizeInSeconds);
}
// Please download test files from
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
function createSpokenLanguageID() {
const config = {
whisper: {
encoder: './sherpa-onnx-whisper-tiny/tiny-encoder.int8.onnx',
decoder: './sherpa-onnx-whisper-tiny/tiny-decoder.int8.onnx',
},
debug: true,
numThreads: 1,
provider: 'cpu',
};
return new sherpa_onnx.SpokenLanguageIdentification(config);
}
const slid = createSpokenLanguageID();
const vad = createVad();
const display = new Intl.DisplayNames(['en'], {type: 'language'})
const bufferSizeInSeconds = 30;
const buffer =
new sherpa_onnx.CircularBuffer(bufferSizeInSeconds * vad.config.sampleRate);
const ai = new portAudio.AudioIO({
inOptions: {
channelCount: 1,
closeOnError: true, // Close the stream if an audio error is detected, if
// set false then just log the error
deviceId: -1, // Use -1 or omit the deviceId to select the default device
sampleFormat: portAudio.SampleFormatFloat32,
sampleRate: vad.config.sampleRate,
}
});
let printed = false;
let index = 0;
ai.on('data', data => {
const windowSize = vad.config.sileroVad.windowSize;
buffer.push(new Float32Array(data.buffer));
while (buffer.size() > windowSize) {
const samples = buffer.get(buffer.head(), windowSize);
buffer.pop(windowSize);
vad.acceptWaveform(samples)
if (vad.isDetected() && !printed) {
console.log(`${index}: Detected speech`)
printed = true;
}
if (!vad.isDetected()) {
printed = false;
}
while (!vad.isEmpty()) {
const segment = vad.front();
vad.pop();
const stream = slid.createStream();
stream.acceptWaveform(
{samples: segment.samples, sampleRate: vad.config.sampleRate});
const lang = slid.compute(stream);
const fullLang = display.of(lang);
const filename = `${index}-${fullLang}-${
new Date()
.toLocaleTimeString('en-US', {hour12: false})
.split(' ')[0]}.wav`;
sherpa_onnx.writeWave(
filename,
{samples: segment.samples, sampleRate: vad.config.sampleRate});
const duration = segment.samples.length / vad.config.sampleRate;
console.log(`${index} End of speech. Duration: ${
duration} seconds.\n Detected language: ${fullLang}`);
console.log(`Saved to ${filename}`);
index += 1;
}
}
});
ai.on('close', () => {
console.log('Free resources');
});
ai.start();
console.log('Started! Please speak')