Add WebAssembly (WASM) for speech enhancement GTCRN models (#2002)

This commit is contained in:
Fangjun Kuang
2025-03-13 18:35:03 +08:00
committed by GitHub
parent 6a97f8adcf
commit d320fdf65e
29 changed files with 1175 additions and 161 deletions

View File

@@ -0,0 +1,61 @@
if(NOT $ENV{SHERPA_ONNX_IS_USING_BUILD_WASM_SH})
message(FATAL_ERROR "Please use ./build-wasm-simd-speech-enhancement.sh to build for wasm speech enhancement")
endif()
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/assets/gtcrn.onnx")
message(FATAL_ERROR "Please read ${CMAKE_CURRENT_SOURCE_DIR}/assets/README.md before you continue")
endif()
set(exported_functions
MyPrint
SherpaOnnxCreateOfflineSpeechDenoiser
SherpaOnnxDestroyOfflineSpeechDenoiser
SherpaOnnxOfflineSpeechDenoiserGetSampleRate
SherpaOnnxOfflineSpeechDenoiserRun
SherpaOnnxDestroyDenoisedAudio
SherpaOnnxWriteWave
SherpaOnnxReadWave
SherpaOnnxReadWaveFromBinaryData
SherpaOnnxFreeWave
)
set(mangled_exported_functions)
foreach(x IN LISTS exported_functions)
list(APPEND mangled_exported_functions "_${x}")
endforeach()
list(JOIN mangled_exported_functions "," all_exported_functions)
include_directories(${CMAKE_SOURCE_DIR})
set(MY_FLAGS " -s FORCE_FILESYSTEM=1 -s INITIAL_MEMORY=128MB -s ALLOW_MEMORY_GROWTH=1")
string(APPEND MY_FLAGS " -sSTACK_SIZE=10485760 ") # 10MB
string(APPEND MY_FLAGS " -sEXPORTED_FUNCTIONS=[_CopyHeap,_malloc,_free,${all_exported_functions}] ")
string(APPEND MY_FLAGS "--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/assets@. ")
string(APPEND MY_FLAGS " -sEXPORTED_RUNTIME_METHODS=['ccall','stringToUTF8','setValue','getValue','lengthBytesUTF8','UTF8ToString'] ")
message(STATUS "MY_FLAGS: ${MY_FLAGS}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_FLAGS}")
set(CMAKE_EXECUTBLE_LINKER_FLAGS "${CMAKE_EXECUTBLE_LINKER_FLAGS} ${MY_FLAGS}")
if (NOT CMAKE_EXECUTABLE_SUFFIX STREQUAL ".js")
message(FATAL_ERROR "The default suffix for building executables should be .js!")
endif()
# set(CMAKE_EXECUTABLE_SUFFIX ".html")
add_executable(sherpa-onnx-wasm-main-speech-enhancement sherpa-onnx-wasm-main-speech-enhancement.cc)
target_link_libraries(sherpa-onnx-wasm-main-speech-enhancement sherpa-onnx-c-api)
install(TARGETS sherpa-onnx-wasm-main-speech-enhancement DESTINATION bin/wasm/speech-enhancement)
install(
FILES
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speech-enhancement>/sherpa-onnx-wasm-main-speech-enhancement.js"
"index.html"
"sherpa-onnx-speech-enhancement.js"
"../nodejs/sherpa-onnx-wave.js"
"app-speech-enhancement.js"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speech-enhancement>/sherpa-onnx-wasm-main-speech-enhancement.wasm"
"$<TARGET_FILE_DIR:sherpa-onnx-wasm-main-speech-enhancement>/sherpa-onnx-wasm-main-speech-enhancement.data"
DESTINATION
bin/wasm/speech-enhancement
)

View File

@@ -0,0 +1,135 @@
const fileInput = document.getElementById('fileInput');
let speech_denoiser = null;
const inAudioPlayback = document.getElementById('inAudioPlayback');
const outAudioPlayback = document.getElementById('outAudioPlayback');
Module = {};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.locateFile = function(path, scriptDirectory = '') {
console.log(`path: ${path}, scriptDirectory: ${scriptDirectory}`);
return scriptDirectory + path;
};
// https://emscripten.org/docs/api_reference/module.html#Module.locateFile
Module.setStatus = function(status) {
console.log(`status ${status}`);
const statusElement = document.getElementById('status');
statusElement.textContent = status;
if (status === '') {
statusElement.style.display = 'none';
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.remove('loading');
});
} else {
statusElement.style.display = 'block';
document.querySelectorAll('.tab-content').forEach((tabContentElement) => {
tabContentElement.classList.add('loading');
});
}
};
Module.onRuntimeInitialized = function() {
console.log('Model files downloaded!');
console.log('Initializing speech denoiser ......');
speech_denoiser = createOfflineSpeechDenoiser(Module)
};
async function process(wave) {
let denoised = speech_denoiser.run(wave.samples, wave.sampleRate);
console.log(denoised);
let int16Samples = new Int16Array(denoised.samples.length);
for (var i = 0; i < denoised.samples.length; ++i) {
let s = denoised.samples[i];
if (s >= 1)
s = 1;
else if (s <= -1)
s = -1;
int16Samples[i] = s * 32767;
}
let blob = toWav(int16Samples, denoised.sampleRate);
const objectUrl = URL.createObjectURL(blob);
console.log(objectUrl);
outAudioPlayback.src = objectUrl;
outAudioPlayback.controls = true;
outAudioPlayback.style.display = 'block';
}
fileInput.addEventListener('change', function(event) {
if (!event.target.files || !event.target.files[0]) {
console.log('No file selected.');
return;
}
const file = event.target.files[0];
console.log('Selected file:', file.name, file.type, file.size, 'bytes');
const reader = new FileReader();
reader.onload = function(ev) {
console.log('FileReader onload called.');
const arrayBuffer = ev.target.result;
console.log('ArrayBuffer length:', arrayBuffer.byteLength);
const uint8Array = new Uint8Array(arrayBuffer);
const wave = readWaveFromBinaryData(uint8Array);
if (wave == null) {
alert(
`${file.name} is not a valid .wav file. Please select a *.wav file`);
return;
}
var url = URL.createObjectURL(file);
console.log(`url: ${url}`);
inAudioPlayback.src = url;
inAudioPlayback.style.display = 'block';
process(wave)
console.log('process done')
};
reader.onerror = function(err) {
console.error('FileReader error:', err);
};
console.log('Starting FileReader.readAsArrayBuffer...');
reader.readAsArrayBuffer(file);
});
// this function is copied/modified from
// https://gist.github.com/meziantou/edb7217fddfbb70e899e
function toWav(samples, sampleRate) {
let buf = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buf);
// http://soundfile.sapp.org/doc/WaveFormat/
// F F I R
view.setUint32(0, 0x46464952, true); // chunkID
view.setUint32(4, 36 + samples.length * 2, true); // chunkSize
// E V A W
view.setUint32(8, 0x45564157, true); // format
//
// t m f
view.setUint32(12, 0x20746d66, true); // subchunk1ID
view.setUint32(16, 16, true); // subchunk1Size, 16 for PCM
view.setUint32(20, 1, true); // audioFormat, 1 for PCM
view.setUint16(22, 1, true); // numChannels: 1 channel
view.setUint32(24, sampleRate, true); // sampleRate
view.setUint32(28, sampleRate * 2, true); // byteRate
view.setUint16(32, 2, true); // blockAlign
view.setUint16(34, 16, true); // bitsPerSample
view.setUint32(36, 0x61746164, true); // Subchunk2ID
view.setUint32(40, samples.length * 2, true); // subchunk2Size
let offset = 44;
for (let i = 0; i < samples.length; ++i) {
view.setInt16(offset, samples[i], true);
offset += 2;
}
return new Blob([view], {type: 'audio/wav'});
}

View File

@@ -0,0 +1,40 @@
# Introduction
## Huggingface space
You can visit https://huggingface.co/spaces/k2-fsa/wasm-speech-enhancement-gtcrn
to try it in your browser without building or installing anything.
You can also visit
https://modelscope.cn/studios/csukuangfj/wasm-speech-enhancement-gtcrn
## Usage
Please refer to
https://github.com/k2-fsa/sherpa-onnx/releases/tag/speech-enhancement-models
to download a model.
The following is an example:
```bash
cd sherpa-onnx/wasm/speech-enhancement/assets
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/speech-enhancement-models/gtcrn_simple.onnx
mv gtcrn_simple.onnx gtcrn.onnx
```
You should have the following files in `assets` before you can run
`build-wasm-simd-speech-enhancement.sh`
```
(py38) fangjuns-MacBook-Pro:assets fangjun$ tree .
.
├── README.md
└── gtcrn.onnx
0 directories, 2 files
(py38) fangjuns-MacBook-Pro:assets fangjun$ ls -lh
total 1056
-rw-r--r-- 1 fangjun staff 466B Mar 12 16:13 README.md
-rw-r--r-- 1 fangjun staff 523K Mar 12 16:14 gtcrn.onnx
```

View File

@@ -0,0 +1,98 @@
<html lang="en">
<!--
The UI code is modified from
https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm
-->
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width" />
<title>Next-gen Kaldi WebAssembly with sherpa-onnx for speech enhancement</title>
<style>
h1,div {
text-align: center;
}
textarea {
width:100%;
}
.loading {
display: none !important;
}
</style>
</head>
<body>
<h1>
Next-gen Kaldi + WebAssembly<br/>
Speech Enhancement with <a href="https://github.com/k2-fsa/sherpa-onnx">sherpa-onnx</a><br/>
using <a href="https://github.com/Xiaobin-Rong/gtcrn">GTCRN</a>
</h1>
<div id="status">Loading...</div>
<div id="singleAudioContent" class="tab-content loading">
<div style="display: flex; gap: 1.5rem;">
<!-- Input Section -->
<div style="flex: 1; display: flex; flex-direction: column; gap: 1rem;">
<div style="font-size: 1rem; font-weight: bold; padding: 0.5rem 1rem; background-color: #f8f9fa; border-radius: 8px; display: flex; align-items: center; gap: 0.5rem; color: #6c757d;">
<span style="line-height: 1;">🎵</span> Input
</div>
<!-- Drag and Drop / File Upload -->
<div id="dropzone" style="border: 2px dashed #ced4da; border-radius: 8px; padding: 2rem; text-align: center; color: #6c757d; cursor: pointer; background-color: #f8f9fa; transition: background-color 0.3s, border-color 0.3s; position: relative;">
<input type="file" id="fileInput" accept=".wav" style="position: absolute; top: 0; left: 0; opacity: 0; width: 100%; height: 100%; cursor: pointer;" />
<p style="margin: 0;">Drop Audio Here (*.wav)<br>- or -<br>Click to Upload</p>
</div>
<audio id="inAudioPlayback" controls style="display: none; margin-top: 1rem; width: 100%;"></audio>
</div>
</div>
<div style="display: flex; gap: 1.5rem;">
<!-- Output Section -->
<div style="flex: 1; display: flex; flex-direction: column; gap: 1rem;">
<div style="font-size: 1rem; font-weight: bold; padding: 0.5rem 1rem; background-color: #f8f9fa; border-radius: 8px; display: flex; align-items: center; gap: 0.5rem; color: #6c757d;">
<span style="line-height: 1;">🎵</span> Output
</div>
<audio id="outAudioPlayback" controls style="display: none; margin-top: 1rem; width: 100%;"></audio>
</div>
</div>
<!-- Footer Section -->
<div style="width: 100%; max-width: 900px; margin-top: 1.5rem; background: #fff; padding: 1.5rem; border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); text-align: left; font-size: 0.9rem; color: #6c757d;">
<h3>Description</h3>
<ul>
<li>Everything is <strong>open-sourced.</strong> <a href="https://github.com/k2-fsa/sherpa-onnx">code</a></li>
<li>The model is from <a href="https://github.com/Xiaobin-Rong/gtcrn">GTCRN</a></li>
<li>Please upload .wav files</li>
<ul>
<li>You can download noisy test wave files from <a href="https://htmlpreview.github.io/?https://github.com/Xiaobin-Rong/gtcrn_demo/blob/main/index.html">https://htmlpreview.github.io/?https://github.com/Xiaobin-Rong/gtcrn_demo/blob/main/index.html</a></li>
</ul>
<li>If you have any issues, please either <a href="https://github.com/k2-fsa/sherpa-onnx/issues">file a ticket</a> or contact us via</li>
<ul>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#wechat">WeChat group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#qq">QQ group</a></li>
<li><a href="https://k2-fsa.github.io/sherpa/social-groups.html#bilibili-b">Bilibili</a></li>
</ul>
</ul>
<h3>About This Demo</h3>
<ul>
<li><strong>Private and Secure:</strong> All processing is done locally on your device (CPU) within your browser with a single thread. No server is involved, ensuring privacy and security. You can disconnect from the Internet once this page is loaded.</li>
<li><strong>Efficient Resource Usage:</strong> No GPU is required, leaving system resources available for webLLM analysis.</li>
</ul>
<h3>Latest Update</h3>
<ul>
<li>First working version.</li>
</ul>
<h3>Acknowledgement</h3>
<ul>
<li>We refer to <a href="https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm">https://huggingface.co/spaces/Banafo/Kroko-Streaming-ASR-Wasm</a> for the UI part.</li>
</ul>
</div>
<script src="app-speech-enhancement.js"></script>
<script src="sherpa-onnx-wave.js"></script>
<script src="sherpa-onnx-speech-enhancement.js"></script>
<script src="sherpa-onnx-wasm-main-speech-enhancement.js"></script>
</body>

View File

@@ -0,0 +1,180 @@
function freeConfig(config, Module) {
if ('buffer' in config) {
Module._free(config.buffer);
}
if ('config' in config) {
freeConfig(config.config, Module)
}
if ('gtcrn' in config) {
freeConfig(config.gtcrn, Module)
}
Module._free(config.ptr);
}
function initSherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig(config, Module) {
if (!('model' in config)) {
config.model = '';
}
const modelLen = Module.lengthBytesUTF8(config.model) + 1;
const n = modelLen;
const buffer = Module._malloc(n);
const len = 1 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module.stringToUTF8(config.model, buffer + offset, modelLen);
offset += modelLen;
offset = 0;
Module.setValue(ptr, buffer + offset, 'i8*');
offset += modelLen;
return {
buffer: buffer, ptr: ptr, len: len,
}
}
function initSherpaOnnxOfflineSpeechDenoiserModelConfig(config, Module) {
if (!('gtcrn' in config)) {
config.gtcrn = {model: ''};
}
const gtcrn =
initSherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig(config.gtcrn, Module);
const len = gtcrn.len + 3 * 4;
const ptr = Module._malloc(len);
let offset = 0;
Module._CopyHeap(gtcrn.ptr, gtcrn.len, ptr + offset);
offset += gtcrn.len;
Module.setValue(ptr + offset, config.numThreads || 1, 'i32');
offset += 4;
Module.setValue(ptr + offset, config.debug || 0, 'i32');
offset += 4;
const providerLen = Module.lengthBytesUTF8(config.provider || 'cpu') + 1;
const buffer = Module._malloc(providerLen);
Module.stringToUTF8(config.provider || 'cpu', buffer, providerLen);
Module.setValue(ptr + offset, buffer, 'i8*');
offset += 4;
return {buffer: buffer, ptr: ptr, len: len, gtcrn: gtcrn};
}
function initSherpaOnnxOfflineSpeechDenoiserConfig(config, Module) {
if (!('model' in config)) {
config.model = {
gtcrn: {model: ''},
provider: 'cpu',
debug: 1,
numThreads: 1,
};
}
const modelConfig =
initSherpaOnnxOfflineSpeechDenoiserModelConfig(config.model, Module);
const len = modelConfig.len;
const ptr = Module._malloc(len);
let offset = 0;
Module._CopyHeap(modelConfig.ptr, modelConfig.len, ptr + offset);
offset += modelConfig.len;
return {
ptr: ptr, len: len, config: modelConfig,
}
}
class OfflineSpeechDenoiser {
constructor(configObj, Module) {
console.log(configObj)
const config = initSherpaOnnxOfflineSpeechDenoiserConfig(configObj, Module)
// Module._MyPrint(config.ptr);
const handle = Module._SherpaOnnxCreateOfflineSpeechDenoiser(config.ptr);
freeConfig(config, Module);
this.handle = handle;
this.sampleRate =
Module._SherpaOnnxOfflineSpeechDenoiserGetSampleRate(this.handle);
this.Module = Module
}
free() {
this.Module._SherpaOnnxDestroyOfflineSpeechDenoiser(this.handle);
this.handle = 0
}
/**
* @param samples {Float32Array} Containing samples in the range [-1, 1]
* @param sampleRate {Number}
*/
run(samples, sampleRate) {
const pointer =
this.Module._malloc(samples.length * samples.BYTES_PER_ELEMENT);
this.Module.HEAPF32.set(samples, pointer / samples.BYTES_PER_ELEMENT);
const h = this.Module._SherpaOnnxOfflineSpeechDenoiserRun(
this.handle, pointer, samples.length, sampleRate);
this.Module._free(pointer);
const numSamples = this.Module.HEAP32[h / 4 + 1];
const denoisedSampleRate = this.Module.HEAP32[h / 4 + 2];
const samplesPtr = this.Module.HEAP32[h / 4] / 4;
const denoisedSamples = new Float32Array(numSamples);
for (let i = 0; i < numSamples; i++) {
denoisedSamples[i] = this.Module.HEAPF32[samplesPtr + i];
}
this.Module._SherpaOnnxDestroyDenoisedAudio(h);
return {samples: denoisedSamples, sampleRate: denoisedSampleRate};
}
save(filename, audio) {
const samples = audio.samples;
const sampleRate = audio.sampleRate;
const ptr = this.Module._malloc(samples.length * 4);
for (let i = 0; i < samples.length; i++) {
this.Module.HEAPF32[ptr / 4 + i] = samples[i];
}
const filenameLen = this.Module.lengthBytesUTF8(filename) + 1;
const buffer = this.Module._malloc(filenameLen);
this.Module.stringToUTF8(filename, buffer, filenameLen);
this.Module._SherpaOnnxWriteWave(ptr, samples.length, sampleRate, buffer);
this.Module._free(buffer);
this.Module._free(ptr);
}
}
function createOfflineSpeechDenoiser(Module, myConfig) {
let config = {
model: {
gtcrn: {model: './gtcrn.onnx'},
debug: 0,
},
};
if (myConfig) {
config = myConfig;
}
return new OfflineSpeechDenoiser(config, Module);
}
if (typeof process == 'object' && typeof process.versions == 'object' &&
typeof process.versions.node == 'string') {
module.exports = {
createOfflineSpeechDenoiser,
};
}

View File

@@ -0,0 +1,39 @@
// wasm/sherpa-onnx-wasm-main-speech-enhancement.cc
//
// Copyright (c) 2025 Xiaomi Corporation
#include <stdio.h>
#include <algorithm>
#include <memory>
#include "sherpa-onnx/c-api/c-api.h"
// see also
// https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html
extern "C" {
static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig) == 1 * 4,
"");
static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserModelConfig) ==
sizeof(SherpaOnnxOfflineSpeechDenoiserGtcrnModelConfig) +
3 * 4,
"");
static_assert(sizeof(SherpaOnnxOfflineSpeechDenoiserConfig) ==
sizeof(SherpaOnnxOfflineSpeechDenoiserModelConfig),
"");
void MyPrint(SherpaOnnxOfflineSpeechDenoiserConfig *config) {
auto model = &config->model;
auto gtcrn = &model->gtcrn;
fprintf(stdout, "----------offline speech denoiser model config----------\n");
fprintf(stdout, "gtcrn: %s\n", gtcrn->model);
fprintf(stdout, "num threads: %d\n", model->num_threads);
fprintf(stdout, "debug: %d\n", model->debug);
fprintf(stdout, "provider: %s\n", model->provider);
}
void CopyHeap(const char *src, int32_t num_bytes, char *dst) {
std::copy(src, src + num_bytes, dst);
}
}

View File

@@ -0,0 +1 @@
../nodejs/sherpa-onnx-wave.js