diff --git a/.github/workflows/wasm-simd-hf-space-en-asr-zipformer.yaml b/.github/workflows/wasm-simd-hf-space-en-asr-zipformer.yaml
index d34a182d..b6f3eb03 100644
--- a/.github/workflows/wasm-simd-hf-space-en-asr-zipformer.yaml
+++ b/.github/workflows/wasm-simd-hf-space-en-asr-zipformer.yaml
@@ -144,8 +144,7 @@ jobs:
git clone https://huggingface.co/spaces/k2-fsa/web-assembly-asr-sherpa-onnx-en huggingface
cd huggingface
- rm -fv *.js
- rm -fv *.data
+ rm -rf ./*
git fetch
git pull
git merge -m "merge remote" --ff origin main
diff --git a/.github/workflows/wasm-simd-hf-space-speech-enhancement-gtcrn.yaml b/.github/workflows/wasm-simd-hf-space-speech-enhancement-gtcrn.yaml
new file mode 100644
index 00000000..7b82055b
--- /dev/null
+++ b/.github/workflows/wasm-simd-hf-space-speech-enhancement-gtcrn.yaml
@@ -0,0 +1,169 @@
+name: wasm-simd-hf-space-speech-enhancement-gtcrn
+
+on:
+ push:
+ branches:
+ - wasm
+ - wasm-gtcrn
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+*'
+
+ workflow_dispatch:
+
+concurrency:
+ group: wasm-simd-hf-space-speech-enhancement-gtcrn-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ wasm-simd-hf-space-speech-enhancement-gtcrn:
+ name: wasm gtcrn
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest]
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Install emsdk
+ uses: mymindstorm/setup-emsdk@v14
+ with:
+ version: 3.1.53
+ actions-cache-folder: 'emsdk-cache'
+
+ - name: View emsdk version
+ shell: bash
+ run: |
+ emcc -v
+ echo "--------------------"
+ emcc --check
+
+ - name: Download model
+ shell: bash
+ run: |
+ cd wasm/speech-enhancement/assets
+ curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/speech-enhancement-models/gtcrn_simple.onnx
+ mv gtcrn_simple.onnx gtcrn.onnx
+
+ - name: build
+ shell: bash
+ run: |
+ ./build-wasm-simd-speech-enhancement.sh
+
+ - name: collect files
+ shell: bash
+ run: |
+ SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
+
+ d=sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-speech-enhancement-gtcrn
+ mv build-wasm-simd-speech-enhancement/install/bin/wasm/speech-enhancement $d
+ ls -lh $d
+ tar cjfv $d.tar.bz2 $d
+
+ echo "---"
+
+ ls -lh *.tar.bz2
+
+ - uses: actions/upload-artifact@v4
+ with:
+ name: wasm-speech-enhancement-gtcrn
+ path: ./*.tar.bz2
+
+ - name: Release
+ # if: github.repository_owner == 'csukuangfj' && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
+ uses: svenstaro/upload-release-action@v2
+ with:
+ file_glob: true
+ overwrite: true
+ file: ./*.tar.bz2
+ repo_name: k2-fsa/sherpa-onnx
+ repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }}
+ tag: v1.10.46
+
+ - name: Release
+ if: github.repository_owner == 'k2-fsa' && github.event_name == 'push' && contains(github.ref, 'refs/tags/')
+ uses: svenstaro/upload-release-action@v2
+ with:
+ file_glob: true
+ overwrite: true
+ file: ./*.tar.bz2
+
+ - name: Publish to ModelScope
+ # if: false
+ env:
+ MS_TOKEN: ${{ secrets.MODEL_SCOPE_GIT_TOKEN }}
+ uses: nick-fields/retry@v2
+ with:
+ max_attempts: 20
+ timeout_seconds: 200
+ shell: bash
+ command: |
+ SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
+
+ git config --global user.email "csukuangfj@gmail.com"
+ git config --global user.name "Fangjun Kuang"
+
+ rm -rf ms
+ export GIT_LFS_SKIP_SMUDGE=1
+ export GIT_CLONE_PROTECTION_ACTIVE=false
+
+ git clone http://www.modelscope.cn/studios/csukuangfj/wasm-speech-enhancement-gtcrn.git ms
+
+ cd ms
+ rm -fv *.js
+ rm -fv *.data
+
+ git fetch
+ git pull
+ git merge -m "merge remote" --ff origin main
+
+ cp -v ../sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-*/* .
+
+ git status
+ git lfs track "*.data"
+ git lfs track "*.wasm"
+ ls -lh
+
+ git add .
+ git commit -m "update model"
+ git push http://oauth2:${MS_TOKEN}@www.modelscope.cn/studios/csukuangfj/wasm-speech-enhancement-gtcrn.git
+
+ - name: Publish to huggingface
+ env:
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
+ uses: nick-fields/retry@v2
+ with:
+ max_attempts: 20
+ timeout_seconds: 200
+ shell: bash
+ command: |
+ SHERPA_ONNX_VERSION=v$(grep "SHERPA_ONNX_VERSION" ./CMakeLists.txt | cut -d " " -f 2 | cut -d '"' -f 2)
+
+ git config --global user.email "csukuangfj@gmail.com"
+ git config --global user.name "Fangjun Kuang"
+
+ rm -rf huggingface
+ export GIT_LFS_SKIP_SMUDGE=1
+ export GIT_CLONE_PROTECTION_ACTIVE=false
+
+ git clone https://huggingface.co/spaces/k2-fsa/wasm-speech-enhancement-gtcrn huggingface
+ cd huggingface
+ rm -fv *.js
+ rm -fv *.data
+ git fetch
+ git pull
+ git merge -m "merge remote" --ff origin main
+
+ cp -v ../sherpa-onnx-wasm-simd-${SHERPA_ONNX_VERSION}-*/* .
+
+ git status
+ git lfs track "*.data"
+ git lfs track "*.wasm"
+ ls -lh
+
+ git add .
+ git commit -m "update model"
+ git push https://csukuangfj:$HF_TOKEN@huggingface.co/spaces/k2-fsa/wasm-speech-enhancement-gtcrn main
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7ed33dc3..f67cb7bd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -38,6 +38,7 @@ option(SHERPA_ONNX_ENABLE_WASM_KWS "Whether to enable WASM for KWS" OFF)
option(SHERPA_ONNX_ENABLE_WASM_VAD "Whether to enable WASM for VAD" OFF)
option(SHERPA_ONNX_ENABLE_WASM_VAD_ASR "Whether to enable WASM for VAD+ASR" OFF)
option(SHERPA_ONNX_ENABLE_WASM_NODEJS "Whether to enable WASM for NodeJS" OFF)
+option(SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT "Whether to enable WASM for speech enhancement" OFF)
option(SHERPA_ONNX_ENABLE_BINARY "Whether to build binaries" ON)
option(SHERPA_ONNX_ENABLE_TTS "Whether to build TTS related code" ON)
option(SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION "Whether to build speaker diarization related code" ON)
@@ -149,6 +150,7 @@ message(STATUS "SHERPA_ONNX_ENABLE_WASM_KWS ${SHERPA_ONNX_ENABLE_WASM_KWS}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_VAD ${SHERPA_ONNX_ENABLE_WASM_VAD}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_VAD_ASR ${SHERPA_ONNX_ENABLE_WASM_VAD_ASR}")
message(STATUS "SHERPA_ONNX_ENABLE_WASM_NODEJS ${SHERPA_ONNX_ENABLE_WASM_NODEJS}")
+message(STATUS "SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT ${SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT}")
message(STATUS "SHERPA_ONNX_ENABLE_BINARY ${SHERPA_ONNX_ENABLE_BINARY}")
message(STATUS "SHERPA_ONNX_ENABLE_TTS ${SHERPA_ONNX_ENABLE_TTS}")
message(STATUS "SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION ${SHERPA_ONNX_ENABLE_SPEAKER_DIARIZATION}")
@@ -261,6 +263,12 @@ if(SHERPA_ONNX_ENABLE_WASM_VAD_ASR)
endif()
endif()
+if(SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT)
+ if(NOT SHERPA_ONNX_ENABLE_WASM)
+ message(FATAL_ERROR "Please set SHERPA_ONNX_ENABLE_WASM to ON if you enable WASM for speech enhancement")
+ endif()
+endif()
+
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ version to be used.")
endif()
diff --git a/build-wasm-simd-asr.sh b/build-wasm-simd-asr.sh
index c1953933..11f2e5b8 100755
--- a/build-wasm-simd-asr.sh
+++ b/build-wasm-simd-asr.sh
@@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
diff --git a/build-wasm-simd-kws.sh b/build-wasm-simd-kws.sh
index 408fd75a..fefbff54 100755
--- a/build-wasm-simd-kws.sh
+++ b/build-wasm-simd-kws.sh
@@ -24,6 +24,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
diff --git a/build-wasm-simd-nodejs.sh b/build-wasm-simd-nodejs.sh
index 43023cbe..6d9059d8 100755
--- a/build-wasm-simd-nodejs.sh
+++ b/build-wasm-simd-nodejs.sh
@@ -31,6 +31,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
diff --git a/build-wasm-simd-speaker-diarization.sh b/build-wasm-simd-speaker-diarization.sh
index 888abb56..6eca8af2 100755
--- a/build-wasm-simd-speaker-diarization.sh
+++ b/build-wasm-simd-speaker-diarization.sh
@@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
diff --git a/build-wasm-simd-speech-enhancement.sh b/build-wasm-simd-speech-enhancement.sh
new file mode 100755
index 00000000..f7a19ab5
--- /dev/null
+++ b/build-wasm-simd-speech-enhancement.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+# Copyright (c) 2025 Xiaomi Corporation
+#
+# This script is to build sherpa-onnx for WebAssembly (Speech Enhancement)
+
+set -ex
+
+if [ x"$EMSCRIPTEN" == x"" ]; then
+ if ! command -v emcc &> /dev/null; then
+ echo "Please install emscripten first"
+ echo ""
+ echo "You can use the following commands to install it:"
+ echo ""
+ echo "git clone https://github.com/emscripten-core/emsdk.git"
+ echo "cd emsdk"
+ echo "git pull"
+ echo "./emsdk install 3.1.53"
+ echo "./emsdk activate 3.1.53"
+ echo "source ./emsdk_env.sh"
+ exit 1
+ else
+ EMSCRIPTEN=$(dirname $(realpath $(which emcc)))
+ emcc --version
+ fi
+fi
+
+export EMSCRIPTEN=$EMSCRIPTEN
+echo "EMSCRIPTEN: $EMSCRIPTEN"
+if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
+ echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
+ echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
+ exit 1
+fi
+
+mkdir -p build-wasm-simd-speech-enhancement
+pushd build-wasm-simd-speech-enhancement
+
+export SHERPA_ONNX_IS_USING_BUILD_WASM_SH=ON
+
+cmake \
+ -DCMAKE_INSTALL_PREFIX=./install \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_TOOLCHAIN_FILE=$EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake \
+ \
+ -DSHERPA_ONNX_ENABLE_PYTHON=OFF \
+ -DSHERPA_ONNX_ENABLE_TESTS=OFF \
+ -DSHERPA_ONNX_ENABLE_CHECK=OFF \
+ -DBUILD_SHARED_LIBS=OFF \
+ -DSHERPA_ONNX_ENABLE_PORTAUDIO=OFF \
+ -DSHERPA_ONNX_ENABLE_JNI=OFF \
+ -DSHERPA_ONNX_ENABLE_C_API=ON \
+ -DSHERPA_ONNX_ENABLE_WEBSOCKET=OFF \
+ -DSHERPA_ONNX_ENABLE_GPU=OFF \
+ -DSHERPA_ONNX_ENABLE_WASM=ON \
+ -DSHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT=ON \
+ -DSHERPA_ONNX_ENABLE_BINARY=OFF \
+ -DSHERPA_ONNX_LINK_LIBSTDCPP_STATICALLY=OFF \
+ ..
+make -j2
+make install
+
+ls -lh install/bin/wasm/speech-enhancement
diff --git a/build-wasm-simd-tts.sh b/build-wasm-simd-tts.sh
index c707bef6..b2cd0f66 100755
--- a/build-wasm-simd-tts.sh
+++ b/build-wasm-simd-tts.sh
@@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
diff --git a/build-wasm-simd-vad-asr.sh b/build-wasm-simd-vad-asr.sh
index 62193155..2979702b 100755
--- a/build-wasm-simd-vad-asr.sh
+++ b/build-wasm-simd-vad-asr.sh
@@ -30,6 +30,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
diff --git a/build-wasm-simd-vad.sh b/build-wasm-simd-vad.sh
index 2ab11249..0ae97cd3 100755
--- a/build-wasm-simd-vad.sh
+++ b/build-wasm-simd-vad.sh
@@ -29,6 +29,7 @@ echo "EMSCRIPTEN: $EMSCRIPTEN"
if [ ! -f $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake ]; then
echo "Cannot find $EMSCRIPTEN/cmake/Modules/Platform/Emscripten.cmake"
echo "Please make sure you have installed emsdk correctly"
+ echo "Hint: emsdk 3.1.53 is known to work. Other versions may not work"
exit 1
fi
diff --git a/wasm/CMakeLists.txt b/wasm/CMakeLists.txt
index 7dd6ce7b..0f18d313 100644
--- a/wasm/CMakeLists.txt
+++ b/wasm/CMakeLists.txt
@@ -18,6 +18,10 @@ if(SHERPA_ONNX_ENABLE_WASM_VAD_ASR)
add_subdirectory(vad-asr)
endif()
+if(SHERPA_ONNX_ENABLE_WASM_SPEECH_ENHANCEMENT)
+ add_subdirectory(speech-enhancement)
+endif()
+
if(SHERPA_ONNX_ENABLE_WASM_SPEAKER_DIARIZATION)
add_subdirectory(speaker-diarization)
endif()
diff --git a/wasm/asr/app-asr.js b/wasm/asr/app-asr.js
index d3605b97..94725057 100644
--- a/wasm/asr/app-asr.js
+++ b/wasm/asr/app-asr.js
@@ -5,7 +5,6 @@
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const clearBtn = document.getElementById('clearBtn');
-const hint = document.getElementById('hint');
const soundClips = document.getElementById('sound-clips');
let textArea = document.getElementById('results');
@@ -16,7 +15,7 @@ let resultList = [];
clearBtn.onclick = function() {
resultList = [];
textArea.value = getDisplayResult();
- textArea.scrollTop = textArea.scrollHeight; // auto scroll
+ textArea.scrollTop = textArea.scrollHeight; // auto scroll
};
function getDisplayResult() {
@@ -37,11 +36,39 @@ function getDisplayResult() {
return ans;
}
-
Module = {};
+
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.locateFile = function(path, scriptDirectory = '') {
+ console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
+ return scriptDirectory + path;
+};
+
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.setStatus = function(status) {
+ console.log(`status ${status}`);
+ const statusElement = document.getElementById('status');
+ if (status == "Running...") {
+ status = 'Model downloaded. Initializing recongizer...'
+ }
+ statusElement.textContent = status;
+ if (status === '') {
+ statusElement.style.display = 'none';
+ // statusElement.parentNode.removeChild(statusElement);
+
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.remove('loading');
+ });
+ } else {
+ statusElement.style.display = 'block';
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.add('loading');
+ });
+ }
+};
+
Module.onRuntimeInitialized = function() {
console.log('inited!');
- hint.innerText = 'Model loaded! Please click start';
startBtn.disabled = false;
@@ -53,11 +80,11 @@ let audioCtx;
let mediaStream;
let expectedSampleRate = 16000;
-let recordSampleRate; // the sampleRate of the microphone
-let recorder = null; // the microphone
-let leftchannel = []; // TODO: Use a single channel
+let recordSampleRate; // the sampleRate of the microphone
+let recorder = null; // the microphone
+let leftchannel = []; // TODO: Use a single channel
-let recordingLength = 0; // number of samples so far
+let recordingLength = 0; // number of samples so far
let recognizer = null;
let recognizer_stream = null;
@@ -66,11 +93,11 @@ if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
// see https://w3c.github.io/mediacapture-main/#dom-mediadevices-getusermedia
- const constraints = {audio: true};
+ const constraints = {audio : true};
let onSuccess = function(stream) {
if (!audioCtx) {
- audioCtx = new AudioContext({sampleRate: 16000});
+ audioCtx = new AudioContext({sampleRate : 16000});
}
console.log(audioCtx);
recordSampleRate = audioCtx.sampleRate;
@@ -120,7 +147,6 @@ if (navigator.mediaDevices.getUserMedia) {
result = recognizer.getResult(recognizer_stream).text;
}
-
if (result.length > 0 && lastResult != result) {
lastResult = result;
}
@@ -134,7 +160,7 @@ if (navigator.mediaDevices.getUserMedia) {
}
textArea.value = getDisplayResult();
- textArea.scrollTop = textArea.scrollHeight; // auto scroll
+ textArea.scrollTop = textArea.scrollHeight; // auto scroll
let buf = new Int16Array(samples.length);
for (var i = 0; i < samples.length; ++i) {
@@ -221,9 +247,8 @@ if (navigator.mediaDevices.getUserMedia) {
};
};
- let onError = function(err) {
- console.log('The following error occured: ' + err);
- };
+ let onError = function(
+ err) { console.log('The following error occured: ' + err); };
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
@@ -231,7 +256,6 @@ if (navigator.mediaDevices.getUserMedia) {
alert('getUserMedia not supported on your browser!');
}
-
// this function is copied/modified from
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
function flatten(listOfSamples) {
@@ -257,22 +281,22 @@ function toWav(samples) {
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
- view.setUint32(0, 0x46464952, true); // chunkID
- view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
+ view.setUint32(0, 0x46464952, true); // chunkID
+ view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
- view.setUint32(8, 0x45564157, true); // format
- //
+ view.setUint32(8, 0x45564157, true); // format
+ //
// t m f
- view.setUint32(12, 0x20746d66, true); // subchunk1ID
- view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
- view.setUint32(20, 1, true); // audioFormat, 1 for PCM
- view.setUint16(22, 1, true); // numChannels: 1 channel
- view.setUint32(24, expectedSampleRate, true); // sampleRate
- view.setUint32(28, expectedSampleRate * 2, true); // byteRate
- view.setUint16(32, 2, true); // blockAlign
- view.setUint16(34, 16, true); // bitsPerSample
- view.setUint32(36, 0x61746164, true); // Subchunk2ID
- view.setUint32(40, samples.length * 2, true); // subchunk2Size
+ view.setUint32(12, 0x20746d66, true); // subchunk1ID
+ view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
+ view.setUint32(20, 1, true); // audioFormat, 1 for PCM
+ view.setUint16(22, 1, true); // numChannels: 1 channel
+ view.setUint32(24, expectedSampleRate, true); // sampleRate
+ view.setUint32(28, expectedSampleRate * 2, true); // byteRate
+ view.setUint16(32, 2, true); // blockAlign
+ view.setUint16(34, 16, true); // bitsPerSample
+ view.setUint32(36, 0x61746164, true); // Subchunk2ID
+ view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
@@ -280,7 +304,7 @@ function toWav(samples) {
offset += 2;
}
- return new Blob([view], {type: 'audio/wav'});
+ return new Blob([ view ], {type : 'audio/wav'});
}
// this function is copied from
diff --git a/wasm/asr/index.html b/wasm/asr/index.html
index 53ee43d8..f8f808ef 100644
--- a/wasm/asr/index.html
+++ b/wasm/asr/index.html
@@ -11,30 +11,70 @@
textarea {
width:100%;
}
+ .loading {
+ display: none !important;
+ }
-
+
Next-gen Kaldi + WebAssembly
ASR Demo with sherpa-onnx
(with Zipformer)
-
-
Loading model ... ...
-
-
-
Start
-
Stop
-
Clear
-
-
-
+
+
Loading...
+
+
+
+
+ Start
+ Stop
+ Clear
+
+
+
+
+
+
+
+
-
+
+
+
Description
+
+ Everything is open-sourced. code
+ If you have any issues, please either file a ticket or contact us via
+
+
+
About This Demo
+
+ Private and Secure: All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.
+ Efficient Resource Usage: No GPU is required, leaving system resources available for webLLM analysis.
+
+
Latest Update
+
+ Update UI.
+ First working version.
+
+
+
Acknowledgement
+
+
diff --git a/wasm/nodejs/CMakeLists.txt b/wasm/nodejs/CMakeLists.txt
index dc8d8c85..19312e96 100644
--- a/wasm/nodejs/CMakeLists.txt
+++ b/wasm/nodejs/CMakeLists.txt
@@ -84,6 +84,7 @@ set(exported_functions
#
SherpaOnnxFileExists
SherpaOnnxReadWave
+ SherpaOnnxReadWaveFromBinaryData
SherpaOnnxFreeWave
SherpaOnnxWriteWave
)
diff --git a/wasm/nodejs/sherpa-onnx-wave.js b/wasm/nodejs/sherpa-onnx-wave.js
index af87efc3..e1a6f959 100644
--- a/wasm/nodejs/sherpa-onnx-wave.js
+++ b/wasm/nodejs/sherpa-onnx-wave.js
@@ -12,6 +12,36 @@ function readWave(filename, Module) {
Module._free(pFilename);
+ const samplesPtr = Module.HEAP32[w / 4] / 4;
+ const sampleRate = Module.HEAP32[w / 4 + 1];
+ const numSamples = Module.HEAP32[w / 4 + 2];
+
+ const samples = new Float32Array(numSamples);
+ for (let i = 0; i < numSamples; i++) {
+ samples[i] = Module.HEAPF32[samplesPtr + i];
+ }
+
+ Module._SherpaOnnxFreeWave(w);
+
+ return {samples: samples, sampleRate: sampleRate};
+}
+
+function readWaveFromBinaryData(uint8Array) {
+ const numBytes = uint8Array.length * uint8Array.BYTES_PER_ELEMENT;
+ const pointer = this.Module._malloc(numBytes);
+
+ const dataOnHeap = new Uint8Array(Module.HEAPU8.buffer, pointer, numBytes);
+ dataOnHeap.set(uint8Array);
+
+ const w = this.Module._SherpaOnnxReadWaveFromBinaryData(
+ dataOnHeap.byteOffset, numBytes);
+ if (w == 0) {
+ console.log('Failed to read wave from binary data');
+ return null;
+ }
+
+ this.Module._free(pointer);
+
const samplesPtr = Module.HEAP32[w / 4] / 4;
const sampleRate = Module.HEAP32[w / 4 + 1];
const numSamples = Module.HEAP32[w / 4 + 2];
@@ -53,5 +83,6 @@ if (typeof process == 'object' && typeof process.versions == 'object' &&
module.exports = {
readWave,
writeWave,
+ readWaveFromBinaryData,
};
}
diff --git a/wasm/speech-enhancement/CMakeLists.txt b/wasm/speech-enhancement/CMakeLists.txt
new file mode 100644
index 00000000..41dc7a6c
--- /dev/null
+++ b/wasm/speech-enhancement/CMakeLists.txt
@@ -0,0 +1,61 @@
+if(NOT $ENV{SHERPA_ONNX_IS_USING_BUILD_WASM_SH})
+ message(FATAL_ERROR "Please use ./build-wasm-simd-speech-enhancement.sh to build for wasm speech enhancement")
+endif()
+
+if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/assets/gtcrn.onnx")
+ message(FATAL_ERROR "Please read ${CMAKE_CURRENT_SOURCE_DIR}/assets/README.md before you continue")
+endif()
+
+set(exported_functions
+ MyPrint
+ SherpaOnnxCreateOfflineSpeechDenoiser
+ SherpaOnnxDestroyOfflineSpeechDenoiser
+ SherpaOnnxOfflineSpeechDenoiserGetSampleRate
+ SherpaOnnxOfflineSpeechDenoiserRun
+ SherpaOnnxDestroyDenoisedAudio
+ SherpaOnnxWriteWave
+ SherpaOnnxReadWave
+ SherpaOnnxReadWaveFromBinaryData
+ SherpaOnnxFreeWave
+)
+set(mangled_exported_functions)
+foreach(x IN LISTS exported_functions)
+ list(APPEND mangled_exported_functions "_${x}")
+endforeach()
+list(JOIN mangled_exported_functions "," all_exported_functions)
+
+
+include_directories(${CMAKE_SOURCE_DIR})
+set(MY_FLAGS " -s FORCE_FILESYSTEM=1 -s INITIAL_MEMORY=128MB -s ALLOW_MEMORY_GROWTH=1")
+string(APPEND MY_FLAGS " -sSTACK_SIZE=10485760 ") # 10MB
+string(APPEND MY_FLAGS " -sEXPORTED_FUNCTIONS=[_CopyHeap,_malloc,_free,${all_exported_functions}] ")
+string(APPEND MY_FLAGS "--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/assets@. ")
+string(APPEND MY_FLAGS " -sEXPORTED_RUNTIME_METHODS=['ccall','stringToUTF8','setValue','getValue','lengthBytesUTF8','UTF8ToString'] ")
+
+message(STATUS "MY_FLAGS: ${MY_FLAGS}")
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_FLAGS}")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_FLAGS}")
+set(CMAKE_EXECUTBLE_LINKER_FLAGS "${CMAKE_EXECUTBLE_LINKER_FLAGS} ${MY_FLAGS}")
+
+if (NOT CMAKE_EXECUTABLE_SUFFIX STREQUAL ".js")
+ message(FATAL_ERROR "The default suffix for building executables should be .js!")
+endif()
+# set(CMAKE_EXECUTABLE_SUFFIX ".html")
+
+add_executable(sherpa-onnx-wasm-main-speech-enhancement sherpa-onnx-wasm-main-speech-enhancement.cc)
+target_link_libraries(sherpa-onnx-wasm-main-speech-enhancement sherpa-onnx-c-api)
+install(TARGETS sherpa-onnx-wasm-main-speech-enhancement DESTINATION bin/wasm/speech-enhancement)
+
+install(
+ FILES
+ "$
/sherpa-onnx-wasm-main-speech-enhancement.js"
+ "index.html"
+ "sherpa-onnx-speech-enhancement.js"
+ "../nodejs/sherpa-onnx-wave.js"
+ "app-speech-enhancement.js"
+ "$/sherpa-onnx-wasm-main-speech-enhancement.wasm"
+ "$/sherpa-onnx-wasm-main-speech-enhancement.data"
+ DESTINATION
+ bin/wasm/speech-enhancement
+)
diff --git a/wasm/speech-enhancement/app-speech-enhancement.js b/wasm/speech-enhancement/app-speech-enhancement.js
new file mode 100644
index 00000000..fe67c425
--- /dev/null
+++ b/wasm/speech-enhancement/app-speech-enhancement.js
@@ -0,0 +1,135 @@
+
+const fileInput = document.getElementById('fileInput');
+
+let speech_denoiser = null;
+const inAudioPlayback = document.getElementById('inAudioPlayback');
+const outAudioPlayback = document.getElementById('outAudioPlayback');
+
+Module = {};
+
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.locateFile = function(path, scriptDirectory = '') {
+ console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
+ return scriptDirectory + path;
+};
+
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.setStatus = function(status) {
+ console.log(`status ${status}`);
+ const statusElement = document.getElementById('status');
+ statusElement.textContent = status;
+ if (status === '') {
+ statusElement.style.display = 'none';
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.remove('loading');
+ });
+ } else {
+ statusElement.style.display = 'block';
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.add('loading');
+ });
+ }
+};
+
+Module.onRuntimeInitialized = function() {
+ console.log('Model files downloaded!');
+
+ console.log('Initializing speech denoiser ......');
+ speech_denoiser = createOfflineSpeechDenoiser(Module)
+};
+
+async function process(wave) {
+ let denoised = speech_denoiser.run(wave.samples, wave.sampleRate);
+ console.log(denoised);
+
+ let int16Samples = new Int16Array(denoised.samples.length);
+ for (var i = 0; i < denoised.samples.length; ++i) {
+ let s = denoised.samples[i];
+ if (s >= 1)
+ s = 1;
+ else if (s <= -1)
+ s = -1;
+
+ int16Samples[i] = s * 32767;
+ }
+
+ let blob = toWav(int16Samples, denoised.sampleRate);
+ const objectUrl = URL.createObjectURL(blob);
+ console.log(objectUrl);
+
+ outAudioPlayback.src = objectUrl;
+ outAudioPlayback.controls = true;
+ outAudioPlayback.style.display = 'block';
+}
+
+fileInput.addEventListener('change', function(event) {
+ if (!event.target.files || !event.target.files[0]) {
+ console.log('No file selected.');
+ return;
+ }
+
+ const file = event.target.files[0];
+ console.log('Selected file:', file.name, file.type, file.size, 'bytes');
+ const reader = new FileReader();
+ reader.onload = function(ev) {
+ console.log('FileReader onload called.');
+ const arrayBuffer = ev.target.result;
+ console.log('ArrayBuffer length:', arrayBuffer.byteLength);
+
+ const uint8Array = new Uint8Array(arrayBuffer);
+ const wave = readWaveFromBinaryData(uint8Array);
+ if (wave == null) {
+ alert(
+ `${file.name} is not a valid .wav file. Please select a *.wav file`);
+ return;
+ }
+
+
+ var url = URL.createObjectURL(file);
+ console.log(`url: ${url}`);
+ inAudioPlayback.src = url;
+ inAudioPlayback.style.display = 'block';
+
+ process(wave)
+ console.log('process done')
+ };
+ reader.onerror = function(err) {
+ console.error('FileReader error:', err);
+ };
+ console.log('Starting FileReader.readAsArrayBuffer...');
+ reader.readAsArrayBuffer(file);
+});
+
+// this function is copied/modified from
+// https://gist.github.com/meziantou/edb7217fddfbb70e899e
+function toWav(samples, sampleRate) {
+ let buf = new ArrayBuffer(44 + samples.length * 2);
+ var view = new DataView(buf);
+
+ // http://soundfile.sapp.org/doc/WaveFormat/
+ // F F I R
+ view.setUint32(0, 0x46464952, true); // chunkID
+ view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
+ // E V A W
+ view.setUint32(8, 0x45564157, true); // format
+ //
+ // t m f
+ view.setUint32(12, 0x20746d66, true); // subchunk1ID
+ view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
+ view.setUint32(20, 1, true); // audioFormat, 1 for PCM
+ view.setUint16(22, 1, true); // numChannels: 1 channel
+ view.setUint32(24, sampleRate, true); // sampleRate
+ view.setUint32(28, sampleRate * 2, true); // byteRate
+ view.setUint16(32, 2, true); // blockAlign
+ view.setUint16(34, 16, true); // bitsPerSample
+ view.setUint32(36, 0x61746164, true); // Subchunk2ID
+ view.setUint32(40, samples.length * 2, true); // subchunk2Size
+
+ let offset = 44;
+ for (let i = 0; i < samples.length; ++i) {
+ view.setInt16(offset, samples[i], true);
+ offset += 2;
+ }
+
+ return new Blob([view], {type: 'audio/wav'});
+}
diff --git a/wasm/speech-enhancement/assets/README.md b/wasm/speech-enhancement/assets/README.md
new file mode 100644
index 00000000..4037c6e3
--- /dev/null
+++ b/wasm/speech-enhancement/assets/README.md
@@ -0,0 +1,40 @@
+# Introduction
+
+## Huggingface space
+
+You can visit https://huggingface.co/spaces/k2-fsa/wasm-speech-enhancement-gtcrn
+to try it in your browser without building or installing anything.
+
+You can also visit
+https://modelscope.cn/studios/csukuangfj/wasm-speech-enhancement-gtcrn
+
+## Usage
+
+Please refer to
+https://github.com/k2-fsa/sherpa-onnx/releases/tag/speech-enhancement-models
+to download a model.
+
+The following is an example:
+
+```bash
+cd sherpa-onnx/wasm/speech-enhancement/assets
+wget https://github.com/k2-fsa/sherpa-onnx/releases/download/speech-enhancement-models/gtcrn_simple.onnx
+
+mv gtcrn_simple.onnx gtcrn.onnx
+```
+
+You should have the following files in `assets` before you can run
+`build-wasm-simd-speech-enhancement.sh`
+
+```
+(py38) fangjuns-MacBook-Pro:assets fangjun$ tree .
+.
+├── README.md
+└── gtcrn.onnx
+
+0 directories, 2 files
+(py38) fangjuns-MacBook-Pro:assets fangjun$ ls -lh
+total 1056
+-rw-r--r-- 1 fangjun staff 466B Mar 12 16:13 README.md
+-rw-r--r-- 1 fangjun staff 523K Mar 12 16:14 gtcrn.onnx
+```
diff --git a/wasm/speech-enhancement/index.html b/wasm/speech-enhancement/index.html
new file mode 100644
index 00000000..3a968441
--- /dev/null
+++ b/wasm/speech-enhancement/index.html
@@ -0,0 +1,98 @@
+
+
+
+
+
+
+
+ Next-gen Kaldi WebAssembly with sherpa-onnx for speech enhancement
+
+
+
+
+
+ Next-gen Kaldi + WebAssembly
+ Speech Enhancement with sherpa-onnx
+ using GTCRN
+
+
+ Loading...
+
+
+
+
+
+
+ 🎵 Input
+
+
+
+
+
+
Drop Audio Here (*.wav) - or - Click to Upload
+
+
+
+
+
+
+
+
+
+
Description
+
+ Everything is open-sourced. code
+ The model is from GTCRN
+ Please upload .wav files
+
+ If you have any issues, please either file a ticket or contact us via
+
+
+
About This Demo
+
+ Private and Secure: All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.
+ Efficient Resource Usage: No GPU is required, leaving system resources available for webLLM analysis.
+
+
Latest Update
+
+ First working version.
+
+
+
Acknowledgement
+
+
+
+
+
+
+
+
diff --git a/wasm/speech-enhancement/sherpa-onnx-speech-enhancement.js b/wasm/speech-enhancement/sherpa-onnx-speech-enhancement.js
new file mode 100644
index 00000000..08651b72
--- /dev/null
+++ b/wasm/speech-enhancement/sherpa-onnx-speech-enhancement.js
@@ -0,0 +1,180 @@
+function freeConfig(config, Module) {
+ if ('buffer' in config) {
+ Module._free(config.buffer);
+ }
+
+ if ('config' in config) {
+ freeConfig(config.config, Module)
+ }
+
+ if ('gtcrn' in config) {
+ freeConfig(config.gtcrn, Module)
+ }
+
+ Module._free(config.ptr);
+}
+
+function initSherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig(config, Module) {
+ if (!('model' in config)) {
+ config.model = '';
+ }
+
+ const modelLen = Module.lengthBytesUTF8(config.model) + 1;
+
+ const n = modelLen;
+
+ const buffer = Module._malloc(n);
+
+ const len = 1 * 4;
+ const ptr = Module._malloc(len);
+
+ let offset = 0;
+ Module.stringToUTF8(config.model, buffer + offset, modelLen);
+ offset += modelLen;
+
+ offset = 0;
+ Module.setValue(ptr, buffer + offset, 'i8*');
+ offset += modelLen;
+
+ return {
+ buffer: buffer, ptr: ptr, len: len,
+ }
+}
+
+function initSherpaOnnxOfflineSpeechDenoiserModelConfig(config, Module) {
+ if (!('gtcrn' in config)) {
+ config.gtcrn = {model: ''};
+ }
+
+ const gtcrn =
+ initSherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig(config.gtcrn, Module);
+
+ const len = gtcrn.len + 3 * 4;
+ const ptr = Module._malloc(len);
+
+ let offset = 0;
+ Module._CopyHeap(gtcrn.ptr, gtcrn.len, ptr + offset);
+ offset += gtcrn.len;
+
+ Module.setValue(ptr + offset, config.numThreads || 1, 'i32');
+ offset += 4;
+
+ Module.setValue(ptr + offset, config.debug || 0, 'i32');
+ offset += 4;
+
+ const providerLen = Module.lengthBytesUTF8(config.provider || 'cpu') + 1;
+ const buffer = Module._malloc(providerLen);
+ Module.stringToUTF8(config.provider || 'cpu', buffer, providerLen);
+ Module.setValue(ptr + offset, buffer, 'i8*');
+ offset += 4;
+
+ return {buffer: buffer, ptr: ptr, len: len, gtcrn: gtcrn};
+}
+
+function initSherpaOnnxOfflineSpeechDenoiserConfig(config, Module) {
+ if (!('model' in config)) {
+ config.model = {
+ gtcrn: {model: ''},
+ provider: 'cpu',
+ debug: 1,
+ numThreads: 1,
+ };
+ }
+
+ const modelConfig =
+ initSherpaOnnxOfflineSpeechDenoiserModelConfig(config.model, Module);
+ const len = modelConfig.len;
+ const ptr = Module._malloc(len);
+
+ let offset = 0;
+ Module._CopyHeap(modelConfig.ptr, modelConfig.len, ptr + offset);
+ offset += modelConfig.len;
+
+ return {
+ ptr: ptr, len: len, config: modelConfig,
+ }
+}
+
+class OfflineSpeechDenoiser {
+ constructor(configObj, Module) {
+ console.log(configObj)
+ const config = initSherpaOnnxOfflineSpeechDenoiserConfig(configObj, Module)
+ // Module._MyPrint(config.ptr);
+ const handle = Module._SherpaOnnxCreateOfflineSpeechDenoiser(config.ptr);
+
+ freeConfig(config, Module);
+
+ this.handle = handle;
+ this.sampleRate =
+ Module._SherpaOnnxOfflineSpeechDenoiserGetSampleRate(this.handle);
+ this.Module = Module
+ }
+
+ free() {
+ this.Module._SherpaOnnxDestroyOfflineSpeechDenoiser(this.handle);
+ this.handle = 0
+ }
+
+ /**
+ * @param samples {Float32Array} Containing samples in the range [-1, 1]
+ * @param sampleRate {Number}
+ */
+ run(samples, sampleRate) {
+ const pointer =
+ this.Module._malloc(samples.length * samples.BYTES_PER_ELEMENT);
+ this.Module.HEAPF32.set(samples, pointer / samples.BYTES_PER_ELEMENT);
+ const h = this.Module._SherpaOnnxOfflineSpeechDenoiserRun(
+ this.handle, pointer, samples.length, sampleRate);
+ this.Module._free(pointer);
+
+ const numSamples = this.Module.HEAP32[h / 4 + 1];
+ const denoisedSampleRate = this.Module.HEAP32[h / 4 + 2];
+
+ const samplesPtr = this.Module.HEAP32[h / 4] / 4;
+ const denoisedSamples = new Float32Array(numSamples);
+ for (let i = 0; i < numSamples; i++) {
+ denoisedSamples[i] = this.Module.HEAPF32[samplesPtr + i];
+ }
+
+ this.Module._SherpaOnnxDestroyDenoisedAudio(h);
+ return {samples: denoisedSamples, sampleRate: denoisedSampleRate};
+ }
+
+ save(filename, audio) {
+ const samples = audio.samples;
+ const sampleRate = audio.sampleRate;
+ const ptr = this.Module._malloc(samples.length * 4);
+ for (let i = 0; i < samples.length; i++) {
+ this.Module.HEAPF32[ptr / 4 + i] = samples[i];
+ }
+
+ const filenameLen = this.Module.lengthBytesUTF8(filename) + 1;
+ const buffer = this.Module._malloc(filenameLen);
+ this.Module.stringToUTF8(filename, buffer, filenameLen);
+ this.Module._SherpaOnnxWriteWave(ptr, samples.length, sampleRate, buffer);
+ this.Module._free(buffer);
+ this.Module._free(ptr);
+ }
+}
+
+function createOfflineSpeechDenoiser(Module, myConfig) {
+ let config = {
+ model: {
+ gtcrn: {model: './gtcrn.onnx'},
+ debug: 0,
+ },
+ };
+
+ if (myConfig) {
+ config = myConfig;
+ }
+
+ return new OfflineSpeechDenoiser(config, Module);
+}
+
+if (typeof process == 'object' && typeof process.versions == 'object' &&
+ typeof process.versions.node == 'string') {
+ module.exports = {
+ createOfflineSpeechDenoiser,
+ };
+}
diff --git a/wasm/speech-enhancement/sherpa-onnx-wasm-main-speech-enhancement.cc b/wasm/speech-enhancement/sherpa-onnx-wasm-main-speech-enhancement.cc
new file mode 100644
index 00000000..b4f5f8d7
--- /dev/null
+++ b/wasm/speech-enhancement/sherpa-onnx-wasm-main-speech-enhancement.cc
@@ -0,0 +1,39 @@
+// wasm/sherpa-onnx-wasm-main-speech-enhancement.cc
+//
+// Copyright (c) 2025 Xiaomi Corporation
+#include
+
+#include
+#include
+
+#include "sherpa-onnx/c-api/c-api.h"
+
+// see also
+// https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html
+
+extern "C" {
+
+static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig) == 1 * 4,
+ "");
+static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserModelConfig) ==
+ sizeof(SherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig) +
+ 3 * 4,
+ "");
+static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserConfig) ==
+ sizeof(SherpaOnnxOfflineSpeechDenoiserModelConfig),
+ "");
+
+void MyPrint(SherpaOnnxOfflineSpeechDenoiserConfig *config) {
+ auto model = &config->model;
+ auto gtcrn = &model->gtcrn;
+ fprintf(stdout, "----------offline speech denoiser model config----------\n");
+ fprintf(stdout, "gtcrn: %s\n", gtcrn->model);
+ fprintf(stdout, "num threads: %d\n", model->num_threads);
+ fprintf(stdout, "debug: %d\n", model->debug);
+ fprintf(stdout, "provider: %s\n", model->provider);
+}
+
+void CopyHeap(const char *src, int32_t num_bytes, char *dst) {
+ std::copy(src, src + num_bytes, dst);
+}
+}
diff --git a/wasm/speech-enhancement/sherpa-onnx-wave.js b/wasm/speech-enhancement/sherpa-onnx-wave.js
new file mode 120000
index 00000000..2a4b0555
--- /dev/null
+++ b/wasm/speech-enhancement/sherpa-onnx-wave.js
@@ -0,0 +1 @@
+../nodejs/sherpa-onnx-wave.js
\ No newline at end of file
diff --git a/wasm/tts/app-tts.js b/wasm/tts/app-tts.js
index d883e511..c87e70f9 100644
--- a/wasm/tts/app-tts.js
+++ b/wasm/tts/app-tts.js
@@ -1,5 +1,4 @@
const generateBtn = document.getElementById('generateBtn');
-const hint = document.getElementById('hint');
const speakerIdLabel = document.getElementById('speakerIdLabel');
const speakerIdInput = document.getElementById('speakerId');
const speedInput = document.getElementById('speed');
@@ -11,13 +10,41 @@ speedValue.innerHTML = speedInput.value;
let index = 0;
-
let tts = null;
let audioCtx = null;
-
Module = {};
+
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.locateFile = function(path, scriptDirectory = '') {
+ console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
+ return scriptDirectory + path;
+};
+
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.setStatus = function(status) {
+ console.log(`status ${status}`);
+ const statusElement = document.getElementById('status');
+ if (status == "Running...") {
+ status = 'Model downloaded. Initializing text to speech model...'
+ }
+ statusElement.textContent = status;
+ if (status === '') {
+ statusElement.style.display = 'none';
+ // statusElement.parentNode.removeChild(statusElement);
+
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.remove('loading');
+ });
+ } else {
+ statusElement.style.display = 'block';
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.add('loading');
+ });
+ }
+};
+
Module.onRuntimeInitialized = function() {
console.log('Model files downloaded!');
@@ -27,17 +54,10 @@ Module.onRuntimeInitialized = function() {
speakerIdLabel.innerHTML = `Speaker ID (0 - ${tts.numSpeakers - 1}):`;
}
- hint.innerText =
- 'Initialized! Please enter text and click the Generate button.';
-
-
-
generateBtn.disabled = false;
};
-speedInput.oninput = function() {
- speedValue.innerHTML = this.value;
-};
+speedInput.oninput = function() { speedValue.innerHTML = this.value; };
generateBtn.onclick = function() {
let speakerId = speakerIdInput.value;
@@ -69,12 +89,12 @@ generateBtn.onclick = function() {
console.log('text', text);
let audio =
- tts.generate({text: text, sid: speakerId, speed: speedInput.value});
+ tts.generate({text : text, sid : speakerId, speed : speedInput.value});
console.log(audio.samples.length, audio.sampleRate);
if (!audioCtx) {
- audioCtx = new AudioContext({sampleRate: tts.sampleRate});
+ audioCtx = new AudioContext({sampleRate : tts.sampleRate});
}
const buffer = audioCtx.createBuffer(1, audio.samples.length, tts.sampleRate);
@@ -155,22 +175,22 @@ function toWav(floatSamples, sampleRate) {
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
- view.setUint32(0, 0x46464952, true); // chunkID
- view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
+ view.setUint32(0, 0x46464952, true); // chunkID
+ view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
- view.setUint32(8, 0x45564157, true); // format
- //
+ view.setUint32(8, 0x45564157, true); // format
+ //
// t m f
- view.setUint32(12, 0x20746d66, true); // subchunk1ID
- view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
- view.setUint32(20, 1, true); // audioFormat, 1 for PCM
- view.setUint16(22, 1, true); // numChannels: 1 channel
- view.setUint32(24, sampleRate, true); // sampleRate
- view.setUint32(28, sampleRate * 2, true); // byteRate
- view.setUint16(32, 2, true); // blockAlign
- view.setUint16(34, 16, true); // bitsPerSample
- view.setUint32(36, 0x61746164, true); // Subchunk2ID
- view.setUint32(40, samples.length * 2, true); // subchunk2Size
+ view.setUint32(12, 0x20746d66, true); // subchunk1ID
+ view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
+ view.setUint32(20, 1, true); // audioFormat, 1 for PCM
+ view.setUint16(22, 1, true); // numChannels: 1 channel
+ view.setUint32(24, sampleRate, true); // sampleRate
+ view.setUint32(28, sampleRate * 2, true); // byteRate
+ view.setUint16(32, 2, true); // blockAlign
+ view.setUint16(34, 16, true); // bitsPerSample
+ view.setUint32(36, 0x61746164, true); // Subchunk2ID
+ view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
@@ -178,5 +198,5 @@ function toWav(floatSamples, sampleRate) {
offset += 2;
}
- return new Blob([view], {type: 'audio/wav'});
+ return new Blob([ view ], {type : 'audio/wav'});
}
diff --git a/wasm/tts/assets/README.md b/wasm/tts/assets/README.md
index 5f65a06e..83ad8cab 100644
--- a/wasm/tts/assets/README.md
+++ b/wasm/tts/assets/README.md
@@ -5,7 +5,7 @@ https://github.com/k2-fsa/sherpa-onnx/releases/tag/tts-models
to download a model.
The following is an example:
-```
+```bash
cd sherpa-onnx/wasm/tts/assets
wget -q https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/vits-piper-en_US-libritts_r-medium.tar.bz2
diff --git a/wasm/tts/index.html b/wasm/tts/index.html
index 2f9decf7..68c4fdf8 100644
--- a/wasm/tts/index.html
+++ b/wasm/tts/index.html
@@ -11,34 +11,70 @@
textarea {
width:100%;
}
+ .loading {
+ display: none !important;
+ }
-
+
Next-gen Kaldi + WebAssembly
Text-to-speech Demo with sherpa-onnx
-
-
Loading model ... ...
-
-
-
Speaker ID:
-
-
-
-
Speed:
-
-
-
-
-
-
-
-
Generate
+
+
-
+
+
+
+
Description
+
+ Everything is open-sourced. code
+ If you have any issues, please either file a ticket or contact us via
+
+
+
About This Demo
+
+ Private and Secure: All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.
+ Efficient Resource Usage: No GPU is required, leaving system resources available for webLLM analysis.
+
+
Latest Update
+
+ Update UI.
+ First working version.
+
+
+
Acknowledgement
+
+
+
diff --git a/wasm/tts/sherpa-onnx-tts.js b/wasm/tts/sherpa-onnx-tts.js
index 4716d1fd..1532ae89 100644
--- a/wasm/tts/sherpa-onnx-tts.js
+++ b/wasm/tts/sherpa-onnx-tts.js
@@ -263,7 +263,7 @@ function initSherpaOnnxOfflineTtsModelConfig(config, Module) {
const providerLen = Module.lengthBytesUTF8(config.provider || 'cpu') + 1;
const buffer = Module._malloc(providerLen);
- Module.stringToUTF8(config.provider, buffer, providerLen);
+ Module.stringToUTF8(config.provider || 'cpu', buffer, providerLen);
Module.setValue(ptr + offset, buffer, 'i8*');
offset += 4;
diff --git a/wasm/vad-asr/app-vad-asr.js b/wasm/vad-asr/app-vad-asr.js
index 68b7b7da..d116a218 100644
--- a/wasm/vad-asr/app-vad-asr.js
+++ b/wasm/vad-asr/app-vad-asr.js
@@ -5,7 +5,6 @@
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const clearBtn = document.getElementById('clearBtn');
-const hint = document.getElementById('hint');
const soundClips = document.getElementById('sound-clips');
let textArea = document.getElementById('results');
@@ -16,7 +15,7 @@ let resultList = [];
clearBtn.onclick = function() {
resultList = [];
textArea.value = getDisplayResult();
- textArea.scrollTop = textArea.scrollHeight; // auto scroll
+ textArea.scrollTop = textArea.scrollHeight; // auto scroll
};
function getDisplayResult() {
@@ -41,19 +40,17 @@ function getDisplayResult() {
return ans;
}
-
-
Module = {};
let audioCtx;
let mediaStream;
let expectedSampleRate = 16000;
-let recordSampleRate; // the sampleRate of the microphone
-let recorder = null; // the microphone
-let leftchannel = []; // TODO: Use a single channel
+let recordSampleRate; // the sampleRate of the microphone
+let recorder = null; // the microphone
+let leftchannel = []; // TODO: Use a single channel
-let recordingLength = 0; // number of samples so far
+let recordingLength = 0; // number of samples so far
let vad = null;
let buffer = null;
@@ -76,47 +73,47 @@ function createOfflineRecognizerSenseVoice() {}
function initOfflineRecognizer() {
let config = {
- modelConfig: {
- debug: 1,
- tokens: './tokens.txt',
+ modelConfig : {
+ debug : 1,
+ tokens : './tokens.txt',
},
};
if (fileExists('sense-voice.onnx') == 1) {
config.modelConfig.senseVoice = {
- model: './sense-voice.onnx',
- useInverseTextNormalization: 1,
+ model : './sense-voice.onnx',
+ useInverseTextNormalization : 1,
};
} else if (fileExists('whisper-encoder.onnx')) {
config.modelConfig.whisper = {
- encoder: './whisper-encoder.onnx',
- decoder: './whisper-decoder.onnx',
+ encoder : './whisper-encoder.onnx',
+ decoder : './whisper-decoder.onnx',
};
} else if (fileExists('transducer-encoder.onnx')) {
config.modelConfig.transducer = {
- encoder: './transducer-encoder.onnx',
- decoder: './transducer-decoder.onnx',
- joiner: './transducer-joiner.onnx',
+ encoder : './transducer-encoder.onnx',
+ decoder : './transducer-decoder.onnx',
+ joiner : './transducer-joiner.onnx',
};
config.modelConfig.modelType = 'transducer';
} else if (fileExists('nemo-transducer-encoder.onnx')) {
config.modelConfig.transducer = {
- encoder: './nemo-transducer-encoder.onnx',
- decoder: './nemo-transducer-decoder.onnx',
- joiner: './nemo-transducer-joiner.onnx',
+ encoder : './nemo-transducer-encoder.onnx',
+ decoder : './nemo-transducer-decoder.onnx',
+ joiner : './nemo-transducer-joiner.onnx',
};
config.modelConfig.modelType = 'nemo_transducer';
} else if (fileExists('paraformer.onnx')) {
config.modelConfig.paraformer = {
- model: './paraformer.onnx',
+ model : './paraformer.onnx',
};
} else if (fileExists('telespeech.onnx')) {
config.modelConfig.telespeechCtc = './telespeech.onnx';
} else if (fileExists('moonshine-preprocessor.onnx')) {
config.modelConfig.moonshine = {
- preprocessor: './moonshine-preprocessor.onnx',
- encoder: './moonshine-encoder.onnx',
- uncachedDecoder: './moonshine-uncached-decoder.onnx',
- cachedDecoder: './moonshine-cached-decoder.onnx'
+ preprocessor : './moonshine-preprocessor.onnx',
+ encoder : './moonshine-encoder.onnx',
+ uncachedDecoder : './moonshine-uncached-decoder.onnx',
+ cachedDecoder : './moonshine-cached-decoder.onnx'
};
} else {
console.log('Please specify a model.');
@@ -126,9 +123,37 @@ function initOfflineRecognizer() {
recognizer = new OfflineRecognizer(config, Module);
}
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.locateFile = function(path, scriptDirectory = '') {
+ console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
+ return scriptDirectory + path;
+};
+
+// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
+Module.setStatus = function(status) {
+ console.log(`status ${status}`);
+ const statusElement = document.getElementById('status');
+ if (status == "Running...") {
+ status = 'Model downloaded. Initializing recongizer...'
+ }
+ statusElement.textContent = status;
+ if (status === '') {
+ statusElement.style.display = 'none';
+ // statusElement.parentNode.removeChild(statusElement);
+
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.remove('loading');
+ });
+ } else {
+ statusElement.style.display = 'block';
+ document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
+ tabContentElement.classList.add('loading');
+ });
+ }
+};
+
Module.onRuntimeInitialized = function() {
console.log('inited!');
- hint.innerText = 'Model loaded! Please click start';
startBtn.disabled = false;
@@ -141,17 +166,15 @@ Module.onRuntimeInitialized = function() {
initOfflineRecognizer();
};
-
-
if (navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia supported.');
// see https://w3c.github.io/mediacapture-main/#dom-mediadevices-getusermedia
- const constraints = {audio: true};
+ const constraints = {audio : true};
let onSuccess = function(stream) {
if (!audioCtx) {
- audioCtx = new AudioContext({sampleRate: expectedSampleRate});
+ audioCtx = new AudioContext({sampleRate : expectedSampleRate});
}
console.log(audioCtx);
recordSampleRate = audioCtx.sampleRate;
@@ -219,7 +242,6 @@ if (navigator.mediaDevices.getUserMedia) {
resultList.push(durationStr);
-
// now save the segment to a wav file
let buf = new Int16Array(segment.samples.length);
for (var i = 0; i < segment.samples.length; ++i) {
@@ -277,7 +299,7 @@ if (navigator.mediaDevices.getUserMedia) {
}
textArea.value = getDisplayResult();
- textArea.scrollTop = textArea.scrollHeight; // auto scroll
+ textArea.scrollTop = textArea.scrollHeight; // auto scroll
};
startBtn.onclick = function() {
@@ -308,9 +330,8 @@ if (navigator.mediaDevices.getUserMedia) {
};
};
- let onError = function(err) {
- console.log('The following error occured: ' + err);
- };
+ let onError = function(
+ err) { console.log('The following error occured: ' + err); };
navigator.mediaDevices.getUserMedia(constraints).then(onSuccess, onError);
} else {
@@ -318,7 +339,6 @@ if (navigator.mediaDevices.getUserMedia) {
alert('getUserMedia not supported on your browser!');
}
-
// this function is copied/modified from
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
function flatten(listOfSamples) {
@@ -344,22 +364,22 @@ function toWav(samples) {
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
- view.setUint32(0, 0x46464952, true); // chunkID
- view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
+ view.setUint32(0, 0x46464952, true); // chunkID
+ view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
- view.setUint32(8, 0x45564157, true); // format
- //
+ view.setUint32(8, 0x45564157, true); // format
+ //
// t m f
- view.setUint32(12, 0x20746d66, true); // subchunk1ID
- view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
- view.setUint32(20, 1, true); // audioFormat, 1 for PCM
- view.setUint16(22, 1, true); // numChannels: 1 channel
- view.setUint32(24, expectedSampleRate, true); // sampleRate
- view.setUint32(28, expectedSampleRate * 2, true); // byteRate
- view.setUint16(32, 2, true); // blockAlign
- view.setUint16(34, 16, true); // bitsPerSample
- view.setUint32(36, 0x61746164, true); // Subchunk2ID
- view.setUint32(40, samples.length * 2, true); // subchunk2Size
+ view.setUint32(12, 0x20746d66, true); // subchunk1ID
+ view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
+ view.setUint32(20, 1, true); // audioFormat, 1 for PCM
+ view.setUint16(22, 1, true); // numChannels: 1 channel
+ view.setUint32(24, expectedSampleRate, true); // sampleRate
+ view.setUint32(28, expectedSampleRate * 2, true); // byteRate
+ view.setUint16(32, 2, true); // blockAlign
+ view.setUint16(34, 16, true); // bitsPerSample
+ view.setUint32(36, 0x61746164, true); // Subchunk2ID
+ view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
@@ -367,7 +387,7 @@ function toWav(samples) {
offset += 2;
}
- return new Blob([view], {type: 'audio/wav'});
+ return new Blob([ view ], {type : 'audio/wav'});
}
// this function is copied from
diff --git a/wasm/vad-asr/index.html b/wasm/vad-asr/index.html
index 3000b6e3..a5d69908 100644
--- a/wasm/vad-asr/index.html
+++ b/wasm/vad-asr/index.html
@@ -11,30 +11,68 @@
textarea {
width:100%;
}
+ .loading {
+ display: none !important;
+ }
-
+
Next-gen Kaldi + WebAssembly
VAD+ASR Demo with sherpa-onnx
(with Zipformer)
-
-
Loading model ... ...
-
-
-
Start
-
Stop
-
Clear
-
-
-
+
+
Loading...
+
+
+
+
+ Start
+ Stop
+ Clear
+
+
+
+
+
+
-
+
+
+
Description
+
+ Everything is open-sourced. code
+ If you have any issues, please either file a ticket or contact us via
+
+
+
About This Demo
+
+ Private and Secure: All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.
+ Efficient Resource Usage: No GPU is required, leaving system resources available for webLLM analysis.
+
+
Latest Update
+
+ Update UI.
+ First working version.
+
+
+
Acknowledgement
+
+