Add WebAssembly for ASR (#604)
This commit is contained in:
@@ -1,3 +1,7 @@
|
||||
if(SHERPA_ONNX_ENABLE_WASM_TTS)
|
||||
add_subdirectory(tts)
|
||||
endif()
|
||||
|
||||
if(SHERPA_ONNX_ENABLE_WASM_ASR)
|
||||
add_subdirectory(asr)
|
||||
endif()
|
||||
|
||||
1
wasm/asr/.gitignore
vendored
Normal file
1
wasm/asr/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.bak
|
||||
62
wasm/asr/CMakeLists.txt
Normal file
62
wasm/asr/CMakeLists.txt
Normal file
@@ -0,0 +1,62 @@
|
||||
if(NOT $ENV{SHERPA_ONNX_IS_USING_BUILD_WASM_SH})
|
||||
message(FATAL_ERROR "Please use ./build-wasm-simd-asr.sh to build for wasm ASR")
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/assets/encoder.onnx")
|
||||
message(FATAL_ERROR "Please read ${CMAKE_CURRENT_SOURCE_DIR}/assets/README.md before you continue")
|
||||
endif()
|
||||
|
||||
set(exported_functions
|
||||
MyPrint
|
||||
# online ASR
|
||||
AcceptWaveform
|
||||
CreateOnlineRecognizer
|
||||
CreateOnlineStream
|
||||
DecodeOnlineStream
|
||||
DestroyOnlineRecognizer
|
||||
DestroyOnlineRecognizerResult
|
||||
DestroyOnlineStream
|
||||
GetOnlineStreamResult
|
||||
InputFinished
|
||||
IsEndpoint
|
||||
IsOnlineStreamReady
|
||||
Reset
|
||||
#
|
||||
)
|
||||
set(mangled_exported_functions)
|
||||
foreach(x IN LISTS exported_functions)
|
||||
list(APPEND mangled_exported_functions "_${x}")
|
||||
endforeach()
|
||||
list(JOIN mangled_exported_functions "," all_exported_functions)
|
||||
|
||||
include_directories(${CMAKE_SOURCE_DIR})
|
||||
set(MY_FLAGS " -s FORCE_FILESYSTEM=1 -s INITIAL_MEMORY=512MB -s ALLOW_MEMORY_GROWTH=1")
|
||||
string(APPEND MY_FLAGS " -sSTACK_SIZE=10485760 ") # 10MB
|
||||
string(APPEND MY_FLAGS " -sEXPORTED_FUNCTIONS=[_CopyHeap,_malloc,_free,${all_exported_functions}] ")
|
||||
string(APPEND MY_FLAGS "--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/assets@. ")
|
||||
string(APPEND MY_FLAGS " -sEXPORTED_RUNTIME_METHODS=['ccall','stringToUTF8','setValue','getValue'] ")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_FLAGS}")
|
||||
set(CMAKE_EXECUTBLE_LINKER_FLAGS "${CMAKE_EXECUTBLE_LINKER_FLAGS} ${MY_FLAGS}")
|
||||
|
||||
if (NOT CMAKE_EXECUTABLE_SUFFIX STREQUAL ".js")
|
||||
message(FATAL_ERROR "The default suffix for building executables should be .js!")
|
||||
endif()
|
||||
# set(CMAKE_EXECUTABLE_SUFFIX ".html")
|
||||
|
||||
add_executable(sherpa-onnx-wasm-asr-main sherpa-onnx-wasm-asr-main.cc)
|
||||
target_link_libraries(sherpa-onnx-wasm-asr-main sherpa-onnx-c-api)
|
||||
install(TARGETS sherpa-onnx-wasm-asr-main DESTINATION bin/wasm/asr)
|
||||
|
||||
install(
|
||||
FILES
|
||||
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-asr-main>/sherpa-onnx-wasm-asr-main.js"
|
||||
"index.html"
|
||||
"sherpa-onnx.js"
|
||||
"app.js"
|
||||
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-asr-main>/sherpa-onnx-wasm-asr-main.wasm"
|
||||
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-asr-main>/sherpa-onnx-wasm-asr-main.data"
|
||||
DESTINATION
|
||||
bin/wasm/asr
|
||||
)
|
||||
299
wasm/asr/app.js
Normal file
299
wasm/asr/app.js
Normal file
@@ -0,0 +1,299 @@
|
||||
// This file copies and modifies code
|
||||
// from https://mdn.github.io/web-dictaphone/scripts/app.js
|
||||
// and https://gist.github.com/meziantou/edb7217fddfbb70e899e
|
||||
|
||||
const startBtn = document.getElementById('startBtn');
|
||||
const stopBtn = document.getElementById('stopBtn');
|
||||
const clearBtn = document.getElementById('clearBtn');
|
||||
const hint = document.getElementById('hint');
|
||||
const soundClips = document.getElementById('sound-clips');
|
||||
|
||||
let textArea = document.getElementById('results');
|
||||
|
||||
let lastResult = '';
|
||||
let resultList = [];
|
||||
|
||||
clearBtn.onclick = function() {
|
||||
resultList = [];
|
||||
textArea.value = getDisplayResult();
|
||||
textArea.scrollTop = textArea.scrollHeight; // auto scroll
|
||||
};
|
||||
|
||||
function getDisplayResult() {
|
||||
let i = 0;
|
||||
let ans = '';
|
||||
for (let s in resultList) {
|
||||
if (resultList[s] == '') {
|
||||
continue;
|
||||
}
|
||||
|
||||
ans += '' + i + ': ' + resultList[s] + '\n';
|
||||
i += 1;
|
||||
}
|
||||
|
||||
if (lastResult.length > 0) {
|
||||
ans += '' + i + ': ' + lastResult + '\n';
|
||||
}
|
||||
return ans;
|
||||
}
|
||||
|
||||
|
||||
Module = {};
|
||||
Module.onRuntimeInitialized = function() {
|
||||
console.log('inited!');
|
||||
hint.innerText = 'Model loaded! Please click start';
|
||||
|
||||
startBtn.disabled = false;
|
||||
|
||||
recognizer = createRecognizer();
|
||||
console.log('recognizer is created!', recognizer);
|
||||
};
|
||||
|
||||
let audioCtx;
|
||||
let mediaStream;
|
||||
|
||||
let expectedSampleRate = 16000;
|
||||
let recordSampleRate; // the sampleRate of the microphone
|
||||
let recorder = null; // the microphone
|
||||
let leftchannel = []; // TODO: Use a single channel
|
||||
|
||||
let recordingLength = 0; // number of samples so far
|
||||
|
||||
let recognizer = null;
|
||||
let recognizer_stream = null;
|
||||
|
||||
if (navigator.mediaDevices.getUserMedia) {
|
||||
console.log('getUserMedia supported.');
|
||||
|
||||
// see https://w3c.github.io/mediacapture-main/#dom-mediadevices-getusermedia
|
||||
const constraints = {audio: true};
|
||||
|
||||
let onSuccess = function(stream) {
|
||||
if (!audioCtx) {
|
||||
audioCtx = new AudioContext({sampleRate: 16000});
|
||||
}
|
||||
console.log(audioCtx);
|
||||
recordSampleRate = audioCtx.sampleRate;
|
||||
console.log('sample rate ' + recordSampleRate);
|
||||
|
||||
// creates an audio node from the microphone incoming stream
|
||||
mediaStream = audioCtx.createMediaStreamSource(stream);
|
||||
console.log('media stream', mediaStream);
|
||||
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/createScriptProcessor
|
||||
// bufferSize: the onaudioprocess event is called when the buffer is full
|
||||
var bufferSize = 4096;
|
||||
var numberOfInputChannels = 1;
|
||||
var numberOfOutputChannels = 2;
|
||||
if (audioCtx.createScriptProcessor) {
|
||||
recorder = audioCtx.createScriptProcessor(
|
||||
bufferSize, numberOfInputChannels, numberOfOutputChannels);
|
||||
} else {
|
||||
recorder = audioCtx.createJavaScriptNode(
|
||||
bufferSize, numberOfInputChannels, numberOfOutputChannels);
|
||||
}
|
||||
console.log('recorder', recorder);
|
||||
|
||||
recorder.onaudioprocess = function(e) {
|
||||
let samples = new Float32Array(e.inputBuffer.getChannelData(0))
|
||||
samples = downsampleBuffer(samples, expectedSampleRate);
|
||||
|
||||
if (recognizer_stream == null) {
|
||||
recognizer_stream = recognizer.createStream();
|
||||
}
|
||||
|
||||
recognizer_stream.acceptWaveform(expectedSampleRate, samples);
|
||||
while (recognizer.isReady(recognizer_stream)) {
|
||||
recognizer.decode(recognizer_stream);
|
||||
}
|
||||
|
||||
let isEndpoint = recognizer.isEndpoint(recognizer_stream);
|
||||
let result = recognizer.getResult(recognizer_stream);
|
||||
|
||||
|
||||
if (result.length > 0 && lastResult != result) {
|
||||
lastResult = result;
|
||||
}
|
||||
|
||||
if (isEndpoint) {
|
||||
if (lastResult.length > 0) {
|
||||
resultList.push(lastResult);
|
||||
lastResult = '';
|
||||
}
|
||||
recognizer.reset(recognizer_stream);
|
||||
}
|
||||
|
||||
textArea.value = getDisplayResult();
|
||||
textArea.scrollTop = textArea.scrollHeight; // auto scroll
|
||||
|
||||
let buf = new Int16Array(samples.length);
|
||||
for (var i = 0; i < samples.length; ++i) {
|
||||
let s = samples[i];
|
||||
if (s >= 1)
|
||||
s = 1;
|
||||
else if (s <= -1)
|
||||
s = -1;
|
||||
|
||||
samples[i] = s;
|
||||
buf[i] = s * 32767;
|
||||
}
|
||||
|
||||
leftchannel.push(buf);
|
||||
recordingLength += bufferSize;
|
||||
};
|
||||
|
||||
startBtn.onclick = function() {
|
||||
mediaStream.connect(recorder);
|
||||
recorder.connect(audioCtx.destination);
|
||||
|
||||
console.log('recorder started');
|
||||
|
||||
stopBtn.disabled = false;
|
||||
startBtn.disabled = true;
|
||||
};
|
||||
|
||||
stopBtn.onclick = function() {
|
||||
console.log('recorder stopped');
|
||||
|
||||
// stopBtn recording
|
||||
recorder.disconnect(audioCtx.destination);
|
||||
mediaStream.disconnect(recorder);
|
||||
|
||||
startBtn.style.background = '';
|
||||
startBtn.style.color = '';
|
||||
// mediaRecorder.requestData();
|
||||
|
||||
stopBtn.disabled = true;
|
||||
startBtn.disabled = false;
|
||||
|
||||
var clipName = new Date().toISOString();
|
||||
|
||||
const clipContainer = document.createElement('article');
|
||||
const clipLabel = document.createElement('p');
|
||||
const audio = document.createElement('audio');
|
||||
const deleteButton = document.createElement('button');
|
||||
clipContainer.classList.add('clip');
|
||||
audio.setAttribute('controls', '');
|
||||
deleteButton.textContent = 'Delete';
|
||||
deleteButton.className = 'delete';
|
||||
|
||||
clipLabel.textContent = clipName;
|
||||
|
||||
clipContainer.appendChild(audio);
|
||||
|
||||
clipContainer.appendChild(clipLabel);
|
||||
clipContainer.appendChild(deleteButton);
|
||||
soundClips.appendChild(clipContainer);
|
||||
|
||||
audio.controls = true;
|
||||
let samples = flatten(leftchannel);
|
||||
const blob = toWav(samples);
|
||||
|
||||
leftchannel = [];
|
||||
const audioURL = window.URL.createObjectURL(blob);
|
||||
audio.src = audioURL;
|
||||
console.log('recorder stopped');
|
||||
|
||||
deleteButton.onclick = function(e) {
|
||||
let evtTgt = e.target;
|
||||
evtTgt.parentNode.parentNode.removeChild(evtTgt.parentNode);
|
||||
};
|
||||
|
||||
clipLabel.onclick = function() {
|
||||
const existingName = clipLabel.textContent;
|
||||
const newClipName = prompt('Enter a new name for your sound clip?');
|
||||
if (newClipName === null) {
|
||||
clipLabel.textContent = existingName;
|
||||
} else {
|
||||
clipLabel.textContent = newClipName;
|
||||
}
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
let onError = function(err) {
|
||||
console.log('The following error occured: ' + err);
|
||||
};
|
||||
|
||||
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
|
||||
} else {
|
||||
console.log('getUserMedia not supported on your browser!');
|
||||
alert('getUserMedia not supported on your browser!');
|
||||
}
|
||||
|
||||
|
||||
// this function is copied/modified from
|
||||
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
|
||||
function flatten(listOfSamples) {
|
||||
let n = 0;
|
||||
for (let i = 0; i < listOfSamples.length; ++i) {
|
||||
n += listOfSamples[i].length;
|
||||
}
|
||||
let ans = new Int16Array(n);
|
||||
|
||||
let offset = 0;
|
||||
for (let i = 0; i < listOfSamples.length; ++i) {
|
||||
ans.set(listOfSamples[i], offset);
|
||||
offset += listOfSamples[i].length;
|
||||
}
|
||||
return ans;
|
||||
}
|
||||
|
||||
// this function is copied/modified from
|
||||
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
|
||||
function toWav(samples) {
|
||||
let buf = new ArrayBuffer(44 + samples.length * 2);
|
||||
var view = new DataView(buf);
|
||||
|
||||
// http://soundfile.sapp.org/doc/WaveFormat/
|
||||
// F F I R
|
||||
view.setUint32(0, 0x46464952, true); // chunkID
|
||||
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
|
||||
// E V A W
|
||||
view.setUint32(8, 0x45564157, true); // format
|
||||
//
|
||||
// t m f
|
||||
view.setUint32(12, 0x20746d66, true); // subchunk1ID
|
||||
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
|
||||
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
|
||||
view.setUint16(22, 1, true); // numChannels: 1 channel
|
||||
view.setUint32(24, expectedSampleRate, true); // sampleRate
|
||||
view.setUint32(28, expectedSampleRate * 2, true); // byteRate
|
||||
view.setUint16(32, 2, true); // blockAlign
|
||||
view.setUint16(34, 16, true); // bitsPerSample
|
||||
view.setUint32(36, 0x61746164, true); // Subchunk2ID
|
||||
view.setUint32(40, samples.length * 2, true); // subchunk2Size
|
||||
|
||||
let offset = 44;
|
||||
for (let i = 0; i < samples.length; ++i) {
|
||||
view.setInt16(offset, samples[i], true);
|
||||
offset += 2;
|
||||
}
|
||||
|
||||
return new Blob([view], {type: 'audio/wav'});
|
||||
}
|
||||
|
||||
// this function is copied from
|
||||
// https://github.com/awslabs/aws-lex-browser-audio-capture/blob/master/lib/worker.js#L46
|
||||
function downsampleBuffer(buffer, exportSampleRate) {
|
||||
if (exportSampleRate === recordSampleRate) {
|
||||
return buffer;
|
||||
}
|
||||
var sampleRateRatio = recordSampleRate / exportSampleRate;
|
||||
var newLength = Math.round(buffer.length / sampleRateRatio);
|
||||
var result = new Float32Array(newLength);
|
||||
var offsetResult = 0;
|
||||
var offsetBuffer = 0;
|
||||
while (offsetResult < result.length) {
|
||||
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
|
||||
var accum = 0, count = 0;
|
||||
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
|
||||
accum += buffer[i];
|
||||
count++;
|
||||
}
|
||||
result[offsetResult] = accum / count;
|
||||
offsetResult++;
|
||||
offsetBuffer = nextOffsetBuffer;
|
||||
}
|
||||
return result;
|
||||
};
|
||||
0
wasm/asr/assets/.gitignore
vendored
Normal file
0
wasm/asr/assets/.gitignore
vendored
Normal file
82
wasm/asr/assets/README.md
Normal file
82
wasm/asr/assets/README.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Introduction
|
||||
|
||||
Please refer to
|
||||
https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
or
|
||||
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
|
||||
to download a model.
|
||||
|
||||
# Streaming ASR
|
||||
|
||||
## Transducer
|
||||
```bash
|
||||
cd sherpa-onnx/wasm/asr/assets
|
||||
|
||||
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
|
||||
tar xvf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
|
||||
rm sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20.tar.bz2
|
||||
|
||||
# Note it is not an error that we rename encoder.int8.onnx to encoder.onnx
|
||||
|
||||
mv sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/encoder-epoch-99-avg-1.int8.onnx encoder.onnx
|
||||
mv sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/decoder-epoch-99-avg-1.onnx decoder.onnx
|
||||
mv sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/joiner-epoch-99-avg-1.int8.onnx joiner.onnx
|
||||
mv sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/tokens.txt ./
|
||||
rm -rf sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20/
|
||||
|
||||
cd ../../..
|
||||
|
||||
./build-wasm-simd-asr.sh
|
||||
```
|
||||
|
||||
You should have the following files in `assets` before you can run
|
||||
`build-wasm-simd-asr.sh`
|
||||
|
||||
```
|
||||
assets fangjun$ tree -L 1
|
||||
.
|
||||
├── README.md
|
||||
├── decoder.onnx
|
||||
├── encoder.onnx
|
||||
├── joiner.onnx
|
||||
└── tokens.txt
|
||||
|
||||
0 directories, 5 files
|
||||
```
|
||||
|
||||
## Paraformer
|
||||
|
||||
```
|
||||
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2
|
||||
tar xvf sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2
|
||||
rm sherpa-onnx-streaming-paraformer-bilingual-zh-en.tar.bz2
|
||||
|
||||
mv sherpa-onnx-streaming-paraformer-bilingual-zh-en/encoder.int8.onnx encoder.onnx
|
||||
mv sherpa-onnx-streaming-paraformer-bilingual-zh-en/decoder.int8.onnx decoder.onnx
|
||||
mv sherpa-onnx-streaming-paraformer-bilingual-zh-en/tokens.txt ./
|
||||
|
||||
rm -rf sherpa-onnx-streaming-paraformer-bilingual-zh-en
|
||||
|
||||
cd ../
|
||||
|
||||
sed -i.bak s/"type = 0"/"type = 1"/g ./sherpa-onnx.js
|
||||
sed -i.bak s/Zipformer/Paraformer/g ./index.html
|
||||
|
||||
cd ../..
|
||||
|
||||
./build-wasm-simd-asr.sh
|
||||
```
|
||||
|
||||
You should have the following files in `assets` before you can run
|
||||
`build-wasm-simd-asr.sh`
|
||||
|
||||
```
|
||||
assets fangjun$ tree -L 1
|
||||
.
|
||||
├── README.md
|
||||
├── decoder.onnx
|
||||
├── encoder.onnx
|
||||
└── tokens.txt
|
||||
|
||||
0 directories, 4 files
|
||||
```
|
||||
42
wasm/asr/index.html
Normal file
42
wasm/asr/index.html
Normal file
@@ -0,0 +1,42 @@
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width" />
|
||||
<title>Next-gen Kaldi WebAssembly with sherpa-onnx for Text-to-speech</title>
|
||||
<style>
|
||||
h1,div {
|
||||
text-align: center;
|
||||
}
|
||||
textarea {
|
||||
width:100%;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>
|
||||
Next-gen Kaldi + WebAssembly<br/>
|
||||
ASR Demo with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a><br/>
|
||||
(with Zipformer)
|
||||
</h1>
|
||||
|
||||
<div>
|
||||
<span id="hint">Loading model ... ...</span>
|
||||
<br/>
|
||||
<br/>
|
||||
<button id="startBtn" disabled>Start</button>
|
||||
<button id="stopBtn" disabled>Stop</button>
|
||||
<button id="clearBtn">Clear</button>
|
||||
<br/>
|
||||
<br/>
|
||||
<textarea id="results" rows="10" readonly></textarea>
|
||||
</div>
|
||||
|
||||
<section flex="1" overflow="auto" id="sound-clips">
|
||||
</section>
|
||||
|
||||
<script src="sherpa-onnx.js"></script>
|
||||
<script src="app.js"></script>
|
||||
<script src="sherpa-onnx-wasm-asr-main.js"></script>
|
||||
</body>
|
||||
75
wasm/asr/sherpa-onnx-wasm-asr-main.cc
Normal file
75
wasm/asr/sherpa-onnx-wasm-asr-main.cc
Normal file
@@ -0,0 +1,75 @@
|
||||
// wasm/sherpa-onnx-wasm-asr-main.cc
|
||||
//
|
||||
// Copyright (c) 2024 Xiaomi Corporation
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
|
||||
#include "sherpa-onnx/c-api/c-api.h"
|
||||
|
||||
// see also
|
||||
// https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html
|
||||
|
||||
extern "C" {
|
||||
|
||||
static_assert(sizeof(SherpaOnnxOnlineTransducerModelConfig) == 3 * 4, "");
|
||||
static_assert(sizeof(SherpaOnnxOnlineParaformerModelConfig) == 2 * 4, "");
|
||||
static_assert(sizeof(SherpaOnnxOnlineZipformer2CtcModelConfig) == 1 * 4, "");
|
||||
static_assert(sizeof(SherpaOnnxOnlineModelConfig) ==
|
||||
sizeof(SherpaOnnxOnlineTransducerModelConfig) +
|
||||
sizeof(SherpaOnnxOnlineParaformerModelConfig) +
|
||||
sizeof(SherpaOnnxOnlineZipformer2CtcModelConfig) + 5 * 4,
|
||||
"");
|
||||
static_assert(sizeof(SherpaOnnxFeatureConfig) == 2 * 4, "");
|
||||
static_assert(sizeof(SherpaOnnxOnlineRecognizerConfig) ==
|
||||
sizeof(SherpaOnnxFeatureConfig) +
|
||||
sizeof(SherpaOnnxOnlineModelConfig) + 8 * 4,
|
||||
"");
|
||||
|
||||
void MyPrint(SherpaOnnxOnlineRecognizerConfig *config) {
|
||||
auto model_config = &config->model_config;
|
||||
auto feat = &config->feat_config;
|
||||
auto transducer_model_config = &model_config->transducer;
|
||||
auto paraformer_model_config = &model_config->paraformer;
|
||||
auto ctc_model_config = &model_config->zipformer2_ctc;
|
||||
|
||||
fprintf(stdout, "----------online transducer model config----------\n");
|
||||
fprintf(stdout, "encoder: %s\n", transducer_model_config->encoder);
|
||||
fprintf(stdout, "decoder: %s\n", transducer_model_config->decoder);
|
||||
fprintf(stdout, "joiner: %s\n", transducer_model_config->joiner);
|
||||
|
||||
fprintf(stdout, "----------online parformer model config----------\n");
|
||||
fprintf(stdout, "encoder: %s\n", paraformer_model_config->encoder);
|
||||
fprintf(stdout, "decoder: %s\n", paraformer_model_config->decoder);
|
||||
|
||||
fprintf(stdout, "----------online ctc model config----------\n");
|
||||
fprintf(stdout, "model: %s\n", ctc_model_config->model);
|
||||
fprintf(stdout, "tokens: %s\n", model_config->tokens);
|
||||
fprintf(stdout, "num_threads: %d\n", model_config->num_threads);
|
||||
fprintf(stdout, "provider: %s\n", model_config->provider);
|
||||
fprintf(stdout, "debug: %d\n", model_config->debug);
|
||||
fprintf(stdout, "model type: %s\n", model_config->model_type);
|
||||
|
||||
fprintf(stdout, "----------feat config----------\n");
|
||||
fprintf(stdout, "sample rate: %d\n", feat->sample_rate);
|
||||
fprintf(stdout, "feat dim: %d\n", feat->feature_dim);
|
||||
|
||||
fprintf(stdout, "----------recognizer config----------\n");
|
||||
fprintf(stdout, "decoding method: %s\n", config->decoding_method);
|
||||
fprintf(stdout, "max active paths: %d\n", config->max_active_paths);
|
||||
fprintf(stdout, "enable_endpoint: %d\n", config->enable_endpoint);
|
||||
fprintf(stdout, "rule1_min_trailing_silence: %.2f\n",
|
||||
config->rule1_min_trailing_silence);
|
||||
fprintf(stdout, "rule2_min_trailing_silence: %.2f\n",
|
||||
config->rule2_min_trailing_silence);
|
||||
fprintf(stdout, "rule3_min_utterance_length: %.2f\n",
|
||||
config->rule3_min_utterance_length);
|
||||
fprintf(stdout, "hotwords_file: %s\n", config->hotwords_file);
|
||||
fprintf(stdout, "hotwords_score: %.2f\n", config->hotwords_score);
|
||||
}
|
||||
|
||||
void CopyHeap(const char *src, int32_t num_bytes, char *dst) {
|
||||
std::copy(src, src + num_bytes, dst);
|
||||
}
|
||||
}
|
||||
381
wasm/asr/sherpa-onnx.js
Normal file
381
wasm/asr/sherpa-onnx.js
Normal file
@@ -0,0 +1,381 @@
|
||||
function freeConfig(config) {
|
||||
if ('buffer' in config) {
|
||||
_free(config.buffer);
|
||||
}
|
||||
|
||||
if ('config' in config) {
|
||||
freeConfig(config.config)
|
||||
}
|
||||
|
||||
if ('transducer' in config) {
|
||||
freeConfig(config.transducer)
|
||||
}
|
||||
|
||||
if ('paraformer' in config) {
|
||||
freeConfig(config.paraformer)
|
||||
}
|
||||
|
||||
if ('ctc' in config) {
|
||||
freeConfig(config.ctc)
|
||||
}
|
||||
|
||||
if ('feat' in config) {
|
||||
freeConfig(config.feat)
|
||||
}
|
||||
|
||||
if ('model' in config) {
|
||||
freeConfig(config.model)
|
||||
}
|
||||
|
||||
_free(config.ptr);
|
||||
}
|
||||
|
||||
// The user should free the returned pointers
|
||||
function initSherpaOnnxOnlineTransducerModelConfig(config) {
|
||||
let encoderLen = lengthBytesUTF8(config.encoder) + 1;
|
||||
let decoderLen = lengthBytesUTF8(config.decoder) + 1;
|
||||
let joinerLen = lengthBytesUTF8(config.joiner) + 1;
|
||||
|
||||
let n = encoderLen + decoderLen + joinerLen;
|
||||
|
||||
let buffer = _malloc(n);
|
||||
|
||||
let len = 3 * 4; // 3 pointers
|
||||
let ptr = _malloc(len);
|
||||
|
||||
let offset = 0;
|
||||
stringToUTF8(config.encoder, buffer + offset, encoderLen);
|
||||
offset += encoderLen;
|
||||
|
||||
stringToUTF8(config.decoder, buffer + offset, decoderLen);
|
||||
offset += decoderLen;
|
||||
|
||||
stringToUTF8(config.joiner, buffer + offset, joinerLen);
|
||||
|
||||
offset = 0;
|
||||
setValue(ptr, buffer + offset, 'i8*');
|
||||
offset += encoderLen;
|
||||
|
||||
setValue(ptr + 4, buffer + offset, 'i8*');
|
||||
offset += decoderLen;
|
||||
|
||||
setValue(ptr + 8, buffer + offset, 'i8*');
|
||||
|
||||
return {
|
||||
buffer: buffer, ptr: ptr, len: len,
|
||||
}
|
||||
}
|
||||
|
||||
function initSherpaOnnxOnlineParaformerModelConfig(config) {
|
||||
let encoderLen = lengthBytesUTF8(config.encoder) + 1;
|
||||
let decoderLen = lengthBytesUTF8(config.decoder) + 1;
|
||||
|
||||
let n = encoderLen + decoderLen;
|
||||
let buffer = _malloc(n);
|
||||
|
||||
let len = 2 * 4; // 2 pointers
|
||||
let ptr = _malloc(len);
|
||||
|
||||
let offset = 0;
|
||||
stringToUTF8(config.encoder, buffer + offset, encoderLen);
|
||||
offset += encoderLen;
|
||||
|
||||
stringToUTF8(config.decoder, buffer + offset, decoderLen);
|
||||
|
||||
offset = 0;
|
||||
setValue(ptr, buffer + offset, 'i8*');
|
||||
offset += encoderLen;
|
||||
|
||||
setValue(ptr + 4, buffer + offset, 'i8*');
|
||||
|
||||
return {
|
||||
buffer: buffer, ptr: ptr, len: len,
|
||||
}
|
||||
}
|
||||
|
||||
function initSherpaOnnxOnlineZipformer2CtcModelConfig(config) {
|
||||
let n = lengthBytesUTF8(config.model) + 1;
|
||||
let buffer = _malloc(n);
|
||||
|
||||
let len = 1 * 4; // 1 pointer
|
||||
let ptr = _malloc(len);
|
||||
|
||||
stringToUTF8(config.model, buffer, n);
|
||||
|
||||
setValue(ptr, buffer, 'i8*');
|
||||
|
||||
return {
|
||||
buffer: buffer, ptr: ptr, len: len,
|
||||
}
|
||||
}
|
||||
|
||||
function initSherpaOnnxOnlineModelConfig(config) {
|
||||
let transducer = initSherpaOnnxOnlineTransducerModelConfig(config.transducer);
|
||||
let paraformer = initSherpaOnnxOnlineParaformerModelConfig(config.paraformer);
|
||||
let ctc = initSherpaOnnxOnlineZipformer2CtcModelConfig(config.zipformer2Ctc);
|
||||
|
||||
let len = transducer.len + paraformer.len + ctc.len + 5 * 4;
|
||||
let ptr = _malloc(len);
|
||||
|
||||
let offset = 0;
|
||||
_CopyHeap(transducer.ptr, transducer.len, ptr + offset);
|
||||
offset += transducer.len;
|
||||
|
||||
_CopyHeap(paraformer.ptr, paraformer.len, ptr + offset);
|
||||
offset += paraformer.len;
|
||||
|
||||
_CopyHeap(ctc.ptr, ctc.len, ptr + offset);
|
||||
offset += ctc.len;
|
||||
|
||||
let tokensLen = lengthBytesUTF8(config.tokens) + 1;
|
||||
let providerLen = lengthBytesUTF8(config.provider) + 1;
|
||||
let modelTypeLen = lengthBytesUTF8(config.modelType) + 1;
|
||||
let bufferLen = tokensLen + providerLen + modelTypeLen;
|
||||
let buffer = _malloc(bufferLen);
|
||||
|
||||
offset = 0;
|
||||
stringToUTF8(config.tokens, buffer, tokensLen);
|
||||
offset += tokensLen;
|
||||
|
||||
stringToUTF8(config.provider, buffer + offset, providerLen);
|
||||
offset += providerLen;
|
||||
|
||||
stringToUTF8(config.modelType, buffer + offset, modelTypeLen);
|
||||
|
||||
offset = transducer.len + paraformer.len + ctc.len;
|
||||
setValue(ptr + offset, buffer, 'i8*'); // tokens
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.numThreads, 'i32');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, buffer + tokensLen, 'i8*'); // provider
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.debug, 'i32');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, buffer + tokensLen + providerLen, 'i8*'); // modelType
|
||||
offset += 4;
|
||||
|
||||
return {
|
||||
buffer: buffer, ptr: ptr, len: len, transducer: transducer,
|
||||
paraformer: paraformer, ctc: ctc
|
||||
}
|
||||
}
|
||||
|
||||
function initSherpaOnnxFeatureConfig(config) {
|
||||
let len = 2 * 4; // 2 pointers
|
||||
let ptr = _malloc(len);
|
||||
|
||||
setValue(ptr, config.sampleRate, 'i32');
|
||||
setValue(ptr + 4, config.featureDim, 'i32');
|
||||
return {ptr: ptr, len: len};
|
||||
}
|
||||
|
||||
function initSherpaOnnxOnlineRecognizerConfig(config) {
|
||||
let feat = initSherpaOnnxFeatureConfig(config.featConfig);
|
||||
let model = initSherpaOnnxOnlineModelConfig(config.modelConfig);
|
||||
|
||||
let len = feat.len + model.len + 8 * 4;
|
||||
let ptr = _malloc(len);
|
||||
|
||||
let offset = 0;
|
||||
_CopyHeap(feat.ptr, feat.len, ptr + offset);
|
||||
offset += feat.len;
|
||||
|
||||
_CopyHeap(model.ptr, model.len, ptr + offset);
|
||||
offset += model.len;
|
||||
|
||||
let decodingMethodLen = lengthBytesUTF8(config.decodingMethod) + 1;
|
||||
let hotwordsFileLen = lengthBytesUTF8(config.hotwordsFile) + 1;
|
||||
let bufferLen = decodingMethodLen + hotwordsFileLen;
|
||||
let buffer = _malloc(bufferLen);
|
||||
|
||||
offset = 0;
|
||||
stringToUTF8(config.decodingMethod, buffer, decodingMethodLen);
|
||||
offset += decodingMethodLen;
|
||||
|
||||
stringToUTF8(config.hotwordsFile, buffer + offset, hotwordsFileLen);
|
||||
|
||||
offset = feat.len + model.len;
|
||||
setValue(ptr + offset, buffer, 'i8*'); // decoding method
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.maxActivePaths, 'i32');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.enableEndpoint, 'i32');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.rule1MinTrailingSilence, 'float');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.rule2MinTrailingSilence, 'float');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.rule3MinUtteranceLength, 'float');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, buffer + decodingMethodLen, 'i8*');
|
||||
offset += 4;
|
||||
|
||||
setValue(ptr + offset, config.hotwordsScore, 'float');
|
||||
offset += 4;
|
||||
|
||||
return {
|
||||
buffer: buffer, ptr: ptr, len: len, feat: feat, model: model
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function createRecognizer() {
|
||||
let onlineTransducerModelConfig = {
|
||||
encoder: '',
|
||||
decoder: '',
|
||||
joiner: '',
|
||||
}
|
||||
|
||||
let onlineParaformerModelConfig = {
|
||||
encoder: '',
|
||||
decoder: '',
|
||||
}
|
||||
|
||||
let onlineZipformer2CtcModelConfig = {
|
||||
model: '',
|
||||
}
|
||||
|
||||
let type = 0;
|
||||
|
||||
switch (type) {
|
||||
case 0:
|
||||
// transducer
|
||||
onlineTransducerModelConfig.encoder = './encoder.onnx';
|
||||
onlineTransducerModelConfig.decoder = './decoder.onnx';
|
||||
onlineTransducerModelConfig.joiner = './joiner.onnx';
|
||||
break;
|
||||
case 1:
|
||||
// paraformer
|
||||
onlineParaformerModelConfig.encoder = './encoder.onnx';
|
||||
onlineParaformerModelConfig.decoder = './decoder.onnx';
|
||||
break;
|
||||
case 2:
|
||||
// ctc
|
||||
onlineZipformer2CtcModelConfig.model = './encoder.onnx';
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
let onlineModelConfig = {
|
||||
transducer: onlineTransducerModelConfig,
|
||||
paraformer: onlineParaformerModelConfig,
|
||||
zipformer2Ctc: onlineZipformer2CtcModelConfig,
|
||||
tokens: './tokens.txt',
|
||||
numThreads: 1,
|
||||
provider: 'cpu',
|
||||
debug: 1,
|
||||
modelType: '',
|
||||
}
|
||||
|
||||
let featureConfig = {
|
||||
sampleRate: 16000,
|
||||
featureDim: 80,
|
||||
}
|
||||
|
||||
let recognizerConfig = {
|
||||
featConfig: featureConfig,
|
||||
modelConfig: onlineModelConfig,
|
||||
decodingMethod: 'greedy_search',
|
||||
maxActivePaths: 4,
|
||||
enableEndpoint: 1,
|
||||
rule1MinTrailingSilence: 2.4,
|
||||
rule2MinTrailingSilence: 1.2,
|
||||
rule3MinUtteranceLength: 20,
|
||||
hotwordsFile: '',
|
||||
hotwordsScore: 1.5,
|
||||
}
|
||||
|
||||
return new OnlineRecognizer(recognizerConfig);
|
||||
}
|
||||
|
||||
class OnlineStream {
|
||||
constructor(handle) {
|
||||
this.handle = handle;
|
||||
this.pointer = null; // buffer
|
||||
this.n = 0; // buffer size
|
||||
}
|
||||
|
||||
free() {
|
||||
if (this.handle) {
|
||||
_DestroyOnlineStream(this.handle);
|
||||
this.handle = null;
|
||||
_free(this.pointer);
|
||||
this.pointer = null;
|
||||
this.n = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param sampleRate {Number}
|
||||
* @param samples {Float32Array} Containing samples in the range [-1, 1]
|
||||
*/
|
||||
acceptWaveform(sampleRate, samples) {
|
||||
if (this.n < samples.length) {
|
||||
_free(this.pointer);
|
||||
this.pointer = _malloc(samples.length * samples.BYTES_PER_ELEMENT);
|
||||
this.n = samples.length
|
||||
}
|
||||
|
||||
Module.HEAPF32.set(samples, this.pointer / samples.BYTES_PER_ELEMENT);
|
||||
_AcceptWaveform(this.handle, sampleRate, this.pointer, samples.length);
|
||||
}
|
||||
|
||||
inputFinished() {
|
||||
_InputFinished(this.handle);
|
||||
}
|
||||
};
|
||||
|
||||
class OnlineRecognizer {
|
||||
constructor(configObj) {
|
||||
let config = initSherpaOnnxOnlineRecognizerConfig(configObj)
|
||||
let handle = _CreateOnlineRecognizer(config.ptr);
|
||||
|
||||
freeConfig(config);
|
||||
|
||||
this.handle = handle;
|
||||
}
|
||||
|
||||
free() {
|
||||
_DestroyOnlineRecognizer(this.handle);
|
||||
this.handle = 0
|
||||
}
|
||||
|
||||
createStream() {
|
||||
let handle = _CreateOnlineStream(this.handle);
|
||||
return new OnlineStream(handle);
|
||||
}
|
||||
|
||||
isReady(stream) {
|
||||
return _IsOnlineStreamReady(this.handle, stream.handle) == 1;
|
||||
}
|
||||
|
||||
decode(stream) {
|
||||
return _DecodeOnlineStream(this.handle, stream.handle);
|
||||
}
|
||||
|
||||
isEndpoint(stream) {
|
||||
return _IsEndpoint(this.handle, stream.handle) == 1;
|
||||
}
|
||||
|
||||
reset(stream) {
|
||||
_Reset(this.handle, stream.handle);
|
||||
}
|
||||
|
||||
getResult(stream) {
|
||||
let r = _GetOnlineStreamResult(this.handle, stream.handle);
|
||||
let textPtr = getValue(r, 'i8*');
|
||||
let text = UTF8ToString(textPtr);
|
||||
_DestroyOnlineRecognizerResult(r);
|
||||
return text;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user