Add keyword spotting API for node-addon-api (#877)

This commit is contained in:
Fangjun Kuang
2024-05-14 20:26:48 +08:00
committed by GitHub
parent 75630b986b
commit 03c956a317
18 changed files with 492 additions and 26 deletions

View File

@@ -62,6 +62,13 @@ The following tables list the examples in this folder.
|[./test_audio_tagging_zipformer.js](./test_audio_tagging_zipformer.js)| Audio tagging with a Zipformer model|
|[./test_audio_tagging_ced.js](./test_audio_tagging_ced.js)| Audio tagging with a [CED](https://github.com/RicherMans/CED) model|
## Keyword spotting
|File| Description|
|---|---|
|[./test_keyword_spotter_transducer.js](./test_keyword_spotter_transducer.js)| Keyword spotting from a file using a Zipformer model|
|[./test_keyword_spotter_transducer_microphone.js](./test_keyword_spotter_transducer_microphone.js)| Keyword spotting from a microphone using a Zipformer model|
## Streaming speech-to-text from files
|File| Description|
@@ -325,3 +332,17 @@ rm sherpa-onnx-punct-ct-transformer-zh-en-vocab272727-2024-04-12.tar.bz2
node ./test_punctuation.js
```
## Keyword spotting
```bash
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/kws-models/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01.tar.bz2
tar xvf sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01.tar.bz2
rm sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01.tar.bz2
node ./test_keyword_spotter_transducer.js
# To run keyword spotting using a microphone
npm install naudiodon2
node ./test_keyword_spotter_transducer_microphone.js
```

View File

@@ -79,11 +79,5 @@ ai.on('data', data => {
}
});
ai.on('close', () => {
console.log('Free resources');
stream.free();
recognizer.free();
});
ai.start();
console.log('Started! Please speak')

View File

@@ -78,11 +78,6 @@ ai.on('data', data => {
}
});
ai.on('close', () => {
console.log('Free resources');
stream.free();
recognizer.free();
});
ai.start();
console.log('Started! Please speak')

View File

@@ -94,11 +94,5 @@ ai.on('data', data => {
}
});
ai.on('close', () => {
console.log('Free resources');
stream.free();
recognizer.free();
});
ai.start();
console.log('Started! Please speak')

View File

@@ -82,11 +82,5 @@ ai.on('data', data => {
}
});
ai.on('close', () => {
console.log('Free resources');
stream.free();
recognizer.free();
});
ai.start();
console.log('Started! Please speak')

View File

@@ -0,0 +1,66 @@
// Copyright (c) 2024 Xiaomi Corporation
const sherpa_onnx = require('sherpa-onnx-node');
const performance = require('perf_hooks').performance;
// Please download test files from
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/kws-models
const config = {
'featConfig': {
'sampleRate': 16000,
'featureDim': 80,
},
'modelConfig': {
'transducer': {
'encoder':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/encoder-epoch-12-avg-2-chunk-16-left-64.onnx',
'decoder':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/decoder-epoch-12-avg-2-chunk-16-left-64.onnx',
'joiner':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/joiner-epoch-12-avg-2-chunk-16-left-64.onnx',
},
'tokens':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/tokens.txt',
'numThreads': 1,
'provider': 'cpu',
'debug': 1,
},
'keywordsFile':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/test_wavs/test_keywords.txt',
};
const waveFilename =
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/test_wavs/3.wav';
const kws = new sherpa_onnx.KeywordSpotter(config);
console.log('Started')
let start = performance.now();
const stream = kws.createStream();
const wave = sherpa_onnx.readWave(waveFilename);
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
const tailPadding = new Float32Array(wave.sampleRate * 0.4);
stream.acceptWaveform({samples: tailPadding, sampleRate: wave.sampleRate});
const detectedKeywords = [];
while (kws.isReady(stream)) {
const keyword = kws.getResult(stream).keyword;
if (keyword != '') {
detectedKeywords.push(keyword);
}
kws.decode(stream);
}
let stop = performance.now();
console.log('Done')
const elapsed_seconds = (stop - start) / 1000;
const duration = wave.samples.length / wave.sampleRate;
const real_time_factor = elapsed_seconds / duration;
console.log('Wave duration', duration.toFixed(3), 'secodns')
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
console.log(
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
real_time_factor.toFixed(3))
console.log(waveFilename)
console.log('result\n', detectedKeywords)

View File

@@ -0,0 +1,74 @@
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
//
const portAudio = require('naudiodon2');
// console.log(portAudio.getDevices());
const sherpa_onnx = require('sherpa-onnx-node');
function createKeywordSpotter() {
const config = {
'featConfig': {
'sampleRate': 16000,
'featureDim': 80,
},
'modelConfig': {
'transducer': {
'encoder':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/encoder-epoch-12-avg-2-chunk-16-left-64.onnx',
'decoder':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/decoder-epoch-12-avg-2-chunk-16-left-64.onnx',
'joiner':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/joiner-epoch-12-avg-2-chunk-16-left-64.onnx',
},
'tokens':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/tokens.txt',
'numThreads': 2,
'provider': 'cpu',
'debug': 1,
},
'keywordsFile':
'./sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/keywords.txt',
};
return new sherpa_onnx.KeywordSpotter(config);
}
const kws = createKeywordSpotter();
const stream = kws.createStream();
let lastText = '';
let segmentIndex = 0;
const ai = new portAudio.AudioIO({
inOptions: {
channelCount: 1,
closeOnError: true, // Close the stream if an audio error is detected, if
// set false then just log the error
deviceId: -1, // Use -1 or omit the deviceId to select the default device
sampleFormat: portAudio.SampleFormatFloat32,
sampleRate: kws.config.featConfig.sampleRate
}
});
const display = new sherpa_onnx.Display(50);
ai.on('data', data => {
const samples = new Float32Array(data.buffer);
stream.acceptWaveform(
{sampleRate: kws.config.featConfig.sampleRate, samples: samples});
while (kws.isReady(stream)) {
kws.decode(stream);
}
const keyword = kws.getResult(stream).keyword
if (keyword != '') {
display.print(segmentIndex, keyword);
segmentIndex += 1;
}
});
ai.start();
console.log('Started! Please speak.')
console.log(`Only words from ${kws.config.keywordsFile} can be recognized`)