Inverse text normalization API of streaming ASR for various programming languages (#1022)
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"sherpa-onnx-node": "^1.9.30"
|
||||
"sherpa-onnx-node": "^1.10.0"
|
||||
}
|
||||
}
|
||||
|
||||
59
nodejs-addon-examples/test_asr_streaming_transducer_itn.js
Normal file
59
nodejs-addon-examples/test_asr_streaming_transducer_itn.js
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
// Please download test files from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'transducer': {
|
||||
'encoder':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/encoder-epoch-99-avg-1.onnx',
|
||||
'decoder':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/decoder-epoch-99-avg-1.onnx',
|
||||
'joiner':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/joiner-epoch-99-avg-1.onnx',
|
||||
},
|
||||
'tokens':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
},
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/itn_zh_number.fst
|
||||
ruleFsts: './itn_zh_number.fst',
|
||||
};
|
||||
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/itn-zh-number.wav
|
||||
const waveFilename = './itn-zh-number.wav';
|
||||
|
||||
const recognizer = new sherpa_onnx.OnlineRecognizer(config);
|
||||
console.log('Started')
|
||||
let start = Date.now();
|
||||
const stream = recognizer.createStream();
|
||||
const wave = sherpa_onnx.readWave(waveFilename);
|
||||
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
|
||||
|
||||
const tailPadding = new Float32Array(wave.sampleRate * 0.4);
|
||||
stream.acceptWaveform({samples: tailPadding, sampleRate: wave.sampleRate});
|
||||
|
||||
while (recognizer.isReady(stream)) {
|
||||
recognizer.decode(stream);
|
||||
}
|
||||
result = recognizer.getResult(stream)
|
||||
let stop = Date.now();
|
||||
console.log('Done')
|
||||
|
||||
const elapsed_seconds = (stop - start) / 1000;
|
||||
const duration = wave.samples.length / wave.sampleRate;
|
||||
const real_time_factor = elapsed_seconds / duration;
|
||||
console.log('Wave duration', duration.toFixed(3), 'secodns')
|
||||
console.log('Elapsed', elapsed_seconds.toFixed(3), 'secodns')
|
||||
console.log(
|
||||
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
|
||||
real_time_factor.toFixed(3))
|
||||
console.log(waveFilename)
|
||||
console.log('result\n', result)
|
||||
@@ -0,0 +1,88 @@
|
||||
// Copyright (c) 2023-2024 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||
//
|
||||
const portAudio = require('naudiodon2');
|
||||
// console.log(portAudio.getDevices());
|
||||
|
||||
const sherpa_onnx = require('sherpa-onnx-node');
|
||||
|
||||
function createOnlineRecognizer() {
|
||||
const config = {
|
||||
'featConfig': {
|
||||
'sampleRate': 16000,
|
||||
'featureDim': 80,
|
||||
},
|
||||
'modelConfig': {
|
||||
'transducer': {
|
||||
'encoder':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/encoder-epoch-99-avg-1.onnx',
|
||||
'decoder':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/decoder-epoch-99-avg-1.onnx',
|
||||
'joiner':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/joiner-epoch-99-avg-1.onnx',
|
||||
},
|
||||
'tokens':
|
||||
'./sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt',
|
||||
'numThreads': 2,
|
||||
'provider': 'cpu',
|
||||
'debug': 1,
|
||||
},
|
||||
'decodingMethod': 'greedy_search',
|
||||
'maxActivePaths': 4,
|
||||
'enableEndpoint': true,
|
||||
'rule1MinTrailingSilence': 2.4,
|
||||
'rule2MinTrailingSilence': 1.2,
|
||||
'rule3MinUtteranceLength': 20,
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/itn_zh_number.fst
|
||||
ruleFsts: './itn_zh_number.fst',
|
||||
};
|
||||
|
||||
return new sherpa_onnx.OnlineRecognizer(config);
|
||||
}
|
||||
|
||||
const recognizer = createOnlineRecognizer();
|
||||
const stream = recognizer.createStream();
|
||||
|
||||
let lastText = '';
|
||||
let segmentIndex = 0;
|
||||
|
||||
const ai = new portAudio.AudioIO({
|
||||
inOptions: {
|
||||
channelCount: 1,
|
||||
closeOnError: true, // Close the stream if an audio error is detected, if
|
||||
// set false then just log the error
|
||||
deviceId: -1, // Use -1 or omit the deviceId to select the default device
|
||||
sampleFormat: portAudio.SampleFormatFloat32,
|
||||
sampleRate: recognizer.config.featConfig.sampleRate
|
||||
}
|
||||
});
|
||||
|
||||
const display = new sherpa_onnx.Display(50);
|
||||
|
||||
ai.on('data', data => {
|
||||
const samples = new Float32Array(data.buffer);
|
||||
|
||||
stream.acceptWaveform(
|
||||
{sampleRate: recognizer.config.featConfig.sampleRate, samples: samples});
|
||||
|
||||
while (recognizer.isReady(stream)) {
|
||||
recognizer.decode(stream);
|
||||
}
|
||||
|
||||
const isEndpoint = recognizer.isEndpoint(stream);
|
||||
const text = recognizer.getResult(stream).text.toLowerCase();
|
||||
|
||||
if (text.length > 0 && lastText != text) {
|
||||
lastText = text;
|
||||
display.print(segmentIndex, lastText);
|
||||
}
|
||||
if (isEndpoint) {
|
||||
if (text.length > 0) {
|
||||
lastText = text;
|
||||
segmentIndex += 1;
|
||||
}
|
||||
recognizer.reset(stream)
|
||||
}
|
||||
});
|
||||
|
||||
ai.start();
|
||||
console.log('Started! Please speak')
|
||||
Reference in New Issue
Block a user