Add Java and Kotlin API for sense voice (#1164)
This commit is contained in:
14
.github/workflows/run-java-test.yaml
vendored
14
.github/workflows/run-java-test.yaml
vendored
@@ -114,6 +114,16 @@ jobs:
|
||||
./run-kws-from-file.sh
|
||||
rm -rf sherpa-onnx-*
|
||||
|
||||
- name: Run java test (VAD + Non-streaming SenseVoice)
|
||||
shell: bash
|
||||
run: |
|
||||
cd ./java-api-examples
|
||||
./run-vad-non-streaming-sense-voice.sh
|
||||
rm *.onnx
|
||||
ls -lh *.wav
|
||||
rm *.wav
|
||||
rm -rf sherpa-onnx-*
|
||||
|
||||
- name: Run java test (VAD + Non-streaming Paraformer)
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -193,6 +203,10 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
cd ./java-api-examples
|
||||
|
||||
./run-non-streaming-decode-file-sense-voice.sh
|
||||
rm -rf sherpa-onnx-sense-voice-*
|
||||
|
||||
./run-inverse-text-normalization-paraformer.sh
|
||||
|
||||
./run-non-streaming-decode-file-paraformer.sh
|
||||
|
||||
50
java-api-examples/NonStreamingDecodeFileSenseVoice.java
Normal file
50
java-api-examples/NonStreamingDecodeFileSenseVoice.java
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2024 Xiaomi Corporation
|
||||
|
||||
// This file shows how to use an offline SenseVoice model,
|
||||
// i.e., non-streaming SenseVoice model,
|
||||
// to decode files.
|
||||
import com.k2fsa.sherpa.onnx.*;
|
||||
|
||||
public class NonStreamingDecodeFileSenseVoice {
|
||||
public static void main(String[] args) {
|
||||
// please refer to
|
||||
// https://k2-fsa.github.io/sherpa/onnx/sense-voice/index.html
|
||||
// to download model files
|
||||
String model = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx";
|
||||
String tokens = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt";
|
||||
|
||||
String waveFilename = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/zh.wav";
|
||||
|
||||
WaveReader reader = new WaveReader(waveFilename);
|
||||
|
||||
OfflineSenseVoiceModelConfig senseVoice =
|
||||
OfflineSenseVoiceModelConfig.builder().setModel(model).build();
|
||||
|
||||
OfflineModelConfig modelConfig =
|
||||
OfflineModelConfig.builder()
|
||||
.setSenseVoice(senseVoice)
|
||||
.setTokens(tokens)
|
||||
.setNumThreads(1)
|
||||
.setDebug(true)
|
||||
.build();
|
||||
|
||||
OfflineRecognizerConfig config =
|
||||
OfflineRecognizerConfig.builder()
|
||||
.setOfflineModelConfig(modelConfig)
|
||||
.setDecodingMethod("greedy_search")
|
||||
.build();
|
||||
|
||||
OfflineRecognizer recognizer = new OfflineRecognizer(config);
|
||||
OfflineStream stream = recognizer.createStream();
|
||||
stream.acceptWaveform(reader.getSamples(), reader.getSampleRate());
|
||||
|
||||
recognizer.decode(stream);
|
||||
|
||||
String text = recognizer.getResult(stream).getText();
|
||||
|
||||
System.out.printf("filename:%s\nresult:%s\n", waveFilename, text);
|
||||
|
||||
stream.release();
|
||||
recognizer.release();
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ This directory contains examples for the JAVA API of sherpa-onnx.
|
||||
|
||||
```bash
|
||||
./run-non-streaming-decode-file-paraformer.sh
|
||||
./run-non-streaming-decode-file-sense-voice.sh
|
||||
./run-non-streaming-decode-file-transducer.sh
|
||||
./run-non-streaming-decode-file-whisper.sh
|
||||
./run-non-streaming-decode-file-nemo.sh
|
||||
@@ -64,6 +65,12 @@ The punctuation model supports both English and Chinese.
|
||||
./run-vad-from-mic.sh
|
||||
```
|
||||
|
||||
## VAD with a microphone + Non-streaming SenseVoice for speech recognition
|
||||
|
||||
```bash
|
||||
./run-vad-from-mic-non-streaming-sense-voice.sh
|
||||
```
|
||||
|
||||
## VAD with a microphone + Non-streaming Paraformer for speech recognition
|
||||
|
||||
```bash
|
||||
@@ -82,6 +89,12 @@ The punctuation model supports both English and Chinese.
|
||||
./run-vad-remove-slience.sh
|
||||
```
|
||||
|
||||
## VAD + Non-streaming SenseVoice for speech recognition
|
||||
|
||||
```bash
|
||||
./run-vad-non-streaming-sense-voice.sh
|
||||
```
|
||||
|
||||
## VAD + Non-streaming Paraformer for speech recognition
|
||||
|
||||
```bash
|
||||
|
||||
142
java-api-examples/VadFromMicWithNonStreamingSenseVoice.java
Normal file
142
java-api-examples/VadFromMicWithNonStreamingSenseVoice.java
Normal file
@@ -0,0 +1,142 @@
|
||||
// Copyright 2024 Xiaomi Corporation
|
||||
|
||||
// This file shows how to use a silero_vad model with a non-streaming
|
||||
// SenseVoice model for speech recognition.
|
||||
|
||||
import com.k2fsa.sherpa.onnx.*;
|
||||
import javax.sound.sampled.*;
|
||||
|
||||
public class VadFromMicWithNonStreamingSenseVoice {
|
||||
private static final int sampleRate = 16000;
|
||||
private static final int windowSize = 512;
|
||||
|
||||
public static Vad createVad() {
|
||||
// please download ./silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
String model = "./silero_vad.onnx";
|
||||
SileroVadModelConfig sileroVad =
|
||||
SileroVadModelConfig.builder()
|
||||
.setModel(model)
|
||||
.setThreshold(0.5f)
|
||||
.setMinSilenceDuration(0.25f)
|
||||
.setMinSpeechDuration(0.5f)
|
||||
.setWindowSize(windowSize)
|
||||
.build();
|
||||
|
||||
VadModelConfig config =
|
||||
VadModelConfig.builder()
|
||||
.setSileroVadModelConfig(sileroVad)
|
||||
.setSampleRate(sampleRate)
|
||||
.setNumThreads(1)
|
||||
.setDebug(true)
|
||||
.setProvider("cpu")
|
||||
.build();
|
||||
|
||||
return new Vad(config);
|
||||
}
|
||||
|
||||
public static OfflineRecognizer createOfflineRecognizer() {
|
||||
// please refer to
|
||||
// https://k2-fsa.github.io/sherpa/onnx/sense-voice/index.html
|
||||
// to download model files
|
||||
String model = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx";
|
||||
String tokens = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt";
|
||||
|
||||
OfflineSenseVoiceModelConfig senseVoice =
|
||||
OfflineSenseVoiceModelConfig.builder().setModel(model).build();
|
||||
|
||||
OfflineModelConfig modelConfig =
|
||||
OfflineModelConfig.builder()
|
||||
.setSenseVoice(senseVoice)
|
||||
.setTokens(tokens)
|
||||
.setNumThreads(1)
|
||||
.setDebug(true)
|
||||
.build();
|
||||
|
||||
OfflineRecognizerConfig config =
|
||||
OfflineRecognizerConfig.builder()
|
||||
.setOfflineModelConfig(modelConfig)
|
||||
.setDecodingMethod("greedy_search")
|
||||
.build();
|
||||
|
||||
return new OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
Vad vad = createVad();
|
||||
OfflineRecognizer recognizer = createOfflineRecognizer();
|
||||
|
||||
// https://docs.oracle.com/javase/8/docs/api/javax/sound/sampled/AudioFormat.html
|
||||
// Linear PCM, 16000Hz, 16-bit, 1 channel, signed, little endian
|
||||
AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
|
||||
|
||||
// https://docs.oracle.com/javase/8/docs/api/javax/sound/sampled/DataLine.Info.html#Info-java.lang.Class-javax.sound.sampled.AudioFormat-int-
|
||||
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
|
||||
TargetDataLine targetDataLine;
|
||||
try {
|
||||
targetDataLine = (TargetDataLine) AudioSystem.getLine(info);
|
||||
targetDataLine.open(format);
|
||||
targetDataLine.start();
|
||||
} catch (LineUnavailableException e) {
|
||||
System.out.println("Failed to open target data line: " + e.getMessage());
|
||||
vad.release();
|
||||
recognizer.release();
|
||||
return;
|
||||
}
|
||||
|
||||
boolean printed = false;
|
||||
byte[] buffer = new byte[windowSize * 2];
|
||||
float[] samples = new float[windowSize];
|
||||
|
||||
System.out.println("Started. Please speak");
|
||||
boolean running = true;
|
||||
while (targetDataLine.isOpen() && running) {
|
||||
int n = targetDataLine.read(buffer, 0, buffer.length);
|
||||
if (n <= 0) {
|
||||
System.out.printf("Got %d bytes. Expected %d bytes.\n", n, buffer.length);
|
||||
continue;
|
||||
}
|
||||
for (int i = 0; i != windowSize; ++i) {
|
||||
short low = buffer[2 * i];
|
||||
short high = buffer[2 * i + 1];
|
||||
int s = (high << 8) + low;
|
||||
samples[i] = (float) s / 32768;
|
||||
}
|
||||
|
||||
vad.acceptWaveform(samples);
|
||||
if (vad.isSpeechDetected() && !printed) {
|
||||
System.out.println("Detected speech");
|
||||
printed = true;
|
||||
}
|
||||
|
||||
if (!vad.isSpeechDetected()) {
|
||||
printed = false;
|
||||
}
|
||||
|
||||
while (!vad.empty()) {
|
||||
SpeechSegment segment = vad.front();
|
||||
float startTime = segment.getStart() / (float) sampleRate;
|
||||
float duration = segment.getSamples().length / (float) sampleRate;
|
||||
|
||||
OfflineStream stream = recognizer.createStream();
|
||||
stream.acceptWaveform(segment.getSamples(), sampleRate);
|
||||
recognizer.decode(stream);
|
||||
String text = recognizer.getResult(stream).getText();
|
||||
stream.release();
|
||||
|
||||
if (!text.isEmpty()) {
|
||||
System.out.printf("%.3f--%.3f: %s\n", startTime, startTime + duration, text);
|
||||
}
|
||||
|
||||
if (text.contains("退出程序")) {
|
||||
running = false;
|
||||
}
|
||||
|
||||
vad.pop();
|
||||
}
|
||||
}
|
||||
|
||||
vad.release();
|
||||
recognizer.release();
|
||||
}
|
||||
}
|
||||
123
java-api-examples/VadNonStreamingSenseVoice.java
Normal file
123
java-api-examples/VadNonStreamingSenseVoice.java
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2024 Xiaomi Corporation
|
||||
|
||||
// This file shows how to use a silero_vad model with a non-streaming SenseVoiceModel
|
||||
// for speech recognition.
|
||||
|
||||
import com.k2fsa.sherpa.onnx.*;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class VadNonStreamingSenseVoice {
|
||||
public static Vad createVad() {
|
||||
// please download ./silero_vad.onnx from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
String model = "./silero_vad.onnx";
|
||||
SileroVadModelConfig sileroVad =
|
||||
SileroVadModelConfig.builder()
|
||||
.setModel(model)
|
||||
.setThreshold(0.5f)
|
||||
.setMinSilenceDuration(0.25f)
|
||||
.setMinSpeechDuration(0.5f)
|
||||
.setWindowSize(512)
|
||||
.build();
|
||||
|
||||
VadModelConfig config =
|
||||
VadModelConfig.builder()
|
||||
.setSileroVadModelConfig(sileroVad)
|
||||
.setSampleRate(16000)
|
||||
.setNumThreads(1)
|
||||
.setDebug(true)
|
||||
.setProvider("cpu")
|
||||
.build();
|
||||
|
||||
return new Vad(config);
|
||||
}
|
||||
|
||||
public static OfflineRecognizer createOfflineRecognizer() {
|
||||
// please refer to
|
||||
// https://k2-fsa.github.io/sherpa/onnx/sense-voice/index.html
|
||||
// to download model files
|
||||
String model = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx";
|
||||
String tokens = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt";
|
||||
|
||||
OfflineSenseVoiceModelConfig senseVoice =
|
||||
OfflineSenseVoiceModelConfig.builder().setModel(model).build();
|
||||
|
||||
OfflineModelConfig modelConfig =
|
||||
OfflineModelConfig.builder()
|
||||
.setSenseVoice(senseVoice)
|
||||
.setTokens(tokens)
|
||||
.setNumThreads(1)
|
||||
.setDebug(true)
|
||||
.build();
|
||||
|
||||
OfflineRecognizerConfig config =
|
||||
OfflineRecognizerConfig.builder()
|
||||
.setOfflineModelConfig(modelConfig)
|
||||
.setDecodingMethod("greedy_search")
|
||||
.build();
|
||||
|
||||
return new OfflineRecognizer(config);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
Vad vad = createVad();
|
||||
OfflineRecognizer recognizer = createOfflineRecognizer();
|
||||
|
||||
// You can download the test file from
|
||||
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
|
||||
String testWaveFilename = "./lei-jun-test.wav";
|
||||
WaveReader reader = new WaveReader(testWaveFilename);
|
||||
|
||||
int numSamples = reader.getSamples().length;
|
||||
int numIter = numSamples / 512;
|
||||
|
||||
for (int i = 0; i != numIter; ++i) {
|
||||
int start = i * 512;
|
||||
int end = start + 512;
|
||||
float[] samples = Arrays.copyOfRange(reader.getSamples(), start, end);
|
||||
vad.acceptWaveform(samples);
|
||||
if (vad.isSpeechDetected()) {
|
||||
while (!vad.empty()) {
|
||||
SpeechSegment segment = vad.front();
|
||||
float startTime = segment.getStart() / 16000.0f;
|
||||
float duration = segment.getSamples().length / 16000.0f;
|
||||
|
||||
OfflineStream stream = recognizer.createStream();
|
||||
stream.acceptWaveform(segment.getSamples(), 16000);
|
||||
recognizer.decode(stream);
|
||||
String text = recognizer.getResult(stream).getText();
|
||||
stream.release();
|
||||
|
||||
if (!text.isEmpty()) {
|
||||
System.out.printf("%.3f--%.3f: %s\n", startTime, startTime + duration, text);
|
||||
}
|
||||
|
||||
vad.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vad.flush();
|
||||
while (!vad.empty()) {
|
||||
SpeechSegment segment = vad.front();
|
||||
float startTime = segment.getStart() / 16000.0f;
|
||||
float duration = segment.getSamples().length / 16000.0f;
|
||||
|
||||
OfflineStream stream = recognizer.createStream();
|
||||
stream.acceptWaveform(segment.getSamples(), 16000);
|
||||
recognizer.decode(stream);
|
||||
String text = recognizer.getResult(stream).getText();
|
||||
stream.release();
|
||||
|
||||
if (!text.isEmpty()) {
|
||||
System.out.printf("%.3f--%.3f: %s\n", startTime, startTime + duration, text);
|
||||
}
|
||||
|
||||
vad.pop();
|
||||
}
|
||||
|
||||
vad.release();
|
||||
recognizer.release();
|
||||
}
|
||||
}
|
||||
37
java-api-examples/run-non-streaming-decode-file-sense-voice.sh
Executable file
37
java-api-examples/run-non-streaming-decode-file-sense-voice.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ ! -f ../build/lib/libsherpa-onnx-jni.dylib && ! -f ../build/lib/libsherpa-onnx-jni.so ]]; then
|
||||
mkdir -p ../build
|
||||
pushd ../build
|
||||
cmake \
|
||||
-DSHERPA_ONNX_ENABLE_PYTHON=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_TESTS=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_CHECK=OFF \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DSHERPA_ONNX_ENABLE_PORTAUDIO=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_JNI=ON \
|
||||
..
|
||||
|
||||
make -j4
|
||||
ls -lh lib
|
||||
popd
|
||||
fi
|
||||
|
||||
if [ ! -f ../sherpa-onnx/java-api/build/sherpa-onnx.jar ]; then
|
||||
pushd ../sherpa-onnx/java-api
|
||||
make
|
||||
popd
|
||||
fi
|
||||
|
||||
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
fi
|
||||
|
||||
java \
|
||||
-Djava.library.path=$PWD/../build/lib \
|
||||
-cp ../sherpa-onnx/java-api/build/sherpa-onnx.jar \
|
||||
NonStreamingDecodeFileSenseVoice.java
|
||||
41
java-api-examples/run-vad-from-mic-non-streaming-sense-voice.sh
Executable file
41
java-api-examples/run-vad-from-mic-non-streaming-sense-voice.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ ! -f ../build/lib/libsherpa-onnx-jni.dylib && ! -f ../build/lib/libsherpa-onnx-jni.so ]]; then
|
||||
mkdir -p ../build
|
||||
pushd ../build
|
||||
cmake \
|
||||
-DSHERPA_ONNX_ENABLE_PYTHON=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_TESTS=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_CHECK=OFF \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DSHERPA_ONNX_ENABLE_PORTAUDIO=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_JNI=ON \
|
||||
..
|
||||
|
||||
make -j4
|
||||
ls -lh lib
|
||||
popd
|
||||
fi
|
||||
|
||||
if [ ! -f ../sherpa-onnx/java-api/build/sherpa-onnx.jar ]; then
|
||||
pushd ../sherpa-onnx/java-api
|
||||
make
|
||||
popd
|
||||
fi
|
||||
|
||||
if [ ! -f ./silero_vad.onnx ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
fi
|
||||
|
||||
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
fi
|
||||
|
||||
java \
|
||||
-Djava.library.path=$PWD/../build/lib \
|
||||
-cp ../sherpa-onnx/java-api/build/sherpa-onnx.jar \
|
||||
./VadFromMicWithNonStreamingSenseVoice.java
|
||||
45
java-api-examples/run-vad-non-streaming-sense-voice.sh
Executable file
45
java-api-examples/run-vad-non-streaming-sense-voice.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ ! -f ../build/lib/libsherpa-onnx-jni.dylib && ! -f ../build/lib/libsherpa-onnx-jni.so ]]; then
|
||||
mkdir -p ../build
|
||||
pushd ../build
|
||||
cmake \
|
||||
-DSHERPA_ONNX_ENABLE_PYTHON=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_TESTS=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_CHECK=OFF \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DSHERPA_ONNX_ENABLE_PORTAUDIO=OFF \
|
||||
-DSHERPA_ONNX_ENABLE_JNI=ON \
|
||||
..
|
||||
|
||||
make -j4
|
||||
ls -lh lib
|
||||
popd
|
||||
fi
|
||||
|
||||
if [ ! -f ../sherpa-onnx/java-api/build/sherpa-onnx.jar ]; then
|
||||
pushd ../sherpa-onnx/java-api
|
||||
make
|
||||
popd
|
||||
fi
|
||||
|
||||
if [ ! -f ./silero_vad.onnx ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/silero_vad.onnx
|
||||
fi
|
||||
|
||||
if [ ! -f ./lei-jun-test.wav ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/lei-jun-test.wav
|
||||
fi
|
||||
|
||||
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
fi
|
||||
|
||||
java \
|
||||
-Djava.library.path=$PWD/../build/lib \
|
||||
-cp ../sherpa-onnx/java-api/build/sherpa-onnx.jar \
|
||||
./VadNonStreamingSenseVoice.java
|
||||
@@ -167,6 +167,12 @@ function testSpokenLanguageIdentification() {
|
||||
}
|
||||
|
||||
function testOfflineAsr() {
|
||||
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
|
||||
fi
|
||||
|
||||
if [ ! -f ./sherpa-onnx-whisper-tiny.en/tiny.en-encoder.int8.onnx ]; then
|
||||
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
tar xvf sherpa-onnx-whisper-tiny.en.tar.bz2
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package com.k2fsa.sherpa.onnx
|
||||
|
||||
fun main() {
|
||||
val types = arrayOf(0, 2, 5, 6)
|
||||
val types = arrayOf(0, 2, 5, 6, 15)
|
||||
for (type in types) {
|
||||
test(type)
|
||||
}
|
||||
@@ -15,6 +15,7 @@ fun test(type: Int) {
|
||||
2 -> "./sherpa-onnx-whisper-tiny.en/test_wavs/0.wav"
|
||||
5 -> "./sherpa-onnx-zipformer-multi-zh-hans-2023-9-2/test_wavs/1.wav"
|
||||
6 -> "./sherpa-onnx-nemo-ctc-en-citrinet-512/test_wavs/8k.wav"
|
||||
15 -> "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/zh.wav"
|
||||
else -> null
|
||||
}
|
||||
|
||||
|
||||
@@ -89,6 +89,23 @@ def get_models():
|
||||
popd
|
||||
""",
|
||||
),
|
||||
Model(
|
||||
model_name="sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17",
|
||||
idx=15,
|
||||
lang="zh_en_ko_ja_yue",
|
||||
short_name="sense_voice",
|
||||
cmd="""
|
||||
pushd $model_name
|
||||
|
||||
rm -rfv test_wavs
|
||||
rm -fv model.onnx
|
||||
rm -fv *.py
|
||||
|
||||
ls -lh
|
||||
|
||||
popd
|
||||
""",
|
||||
),
|
||||
Model(
|
||||
model_name="sherpa-onnx-paraformer-zh-small-2024-03-09",
|
||||
idx=14,
|
||||
|
||||
@@ -27,6 +27,7 @@ java_files += OfflineTransducerModelConfig.java
|
||||
java_files += OfflineParaformerModelConfig.java
|
||||
java_files += OfflineWhisperModelConfig.java
|
||||
java_files += OfflineNemoEncDecCtcModelConfig.java
|
||||
java_files += OfflineSenseVoiceModelConfig.java
|
||||
java_files += OfflineModelConfig.java
|
||||
java_files += OfflineRecognizerConfig.java
|
||||
java_files += OfflineRecognizerResult.java
|
||||
|
||||
@@ -7,6 +7,7 @@ public class OfflineModelConfig {
|
||||
private final OfflineParaformerModelConfig paraformer;
|
||||
private final OfflineWhisperModelConfig whisper;
|
||||
private final OfflineNemoEncDecCtcModelConfig nemo;
|
||||
private final OfflineSenseVoiceModelConfig senseVoice;
|
||||
private final String teleSpeech;
|
||||
private final String tokens;
|
||||
private final int numThreads;
|
||||
@@ -22,6 +23,7 @@ public class OfflineModelConfig {
|
||||
this.paraformer = builder.paraformer;
|
||||
this.whisper = builder.whisper;
|
||||
this.nemo = builder.nemo;
|
||||
this.senseVoice = builder.senseVoice;
|
||||
this.teleSpeech = builder.teleSpeech;
|
||||
this.tokens = builder.tokens;
|
||||
this.numThreads = builder.numThreads;
|
||||
@@ -48,6 +50,10 @@ public class OfflineModelConfig {
|
||||
return whisper;
|
||||
}
|
||||
|
||||
public OfflineSenseVoiceModelConfig getSenseVoice() {
|
||||
return senseVoice;
|
||||
}
|
||||
|
||||
public String getTokens() {
|
||||
return tokens;
|
||||
}
|
||||
@@ -85,6 +91,7 @@ public class OfflineModelConfig {
|
||||
private OfflineTransducerModelConfig transducer = OfflineTransducerModelConfig.builder().build();
|
||||
private OfflineWhisperModelConfig whisper = OfflineWhisperModelConfig.builder().build();
|
||||
private OfflineNemoEncDecCtcModelConfig nemo = OfflineNemoEncDecCtcModelConfig.builder().build();
|
||||
private OfflineSenseVoiceModelConfig senseVoice = OfflineSenseVoiceModelConfig.builder().build();
|
||||
private String teleSpeech = "";
|
||||
private String tokens = "";
|
||||
private int numThreads = 1;
|
||||
@@ -113,7 +120,6 @@ public class OfflineModelConfig {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
public Builder setTeleSpeech(String teleSpeech) {
|
||||
this.teleSpeech = teleSpeech;
|
||||
return this;
|
||||
@@ -124,6 +130,11 @@ public class OfflineModelConfig {
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setSenseVoice(OfflineSenseVoiceModelConfig senseVoice) {
|
||||
this.senseVoice = senseVoice;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setTokens(String tokens) {
|
||||
this.tokens = tokens;
|
||||
return this;
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
// Copyright 2024 Xiaomi Corporation
|
||||
|
||||
package com.k2fsa.sherpa.onnx;
|
||||
|
||||
public class OfflineSenseVoiceModelConfig {
|
||||
private final String model;
|
||||
private final String language;
|
||||
private final boolean useInverseTextNormalization;
|
||||
|
||||
private OfflineSenseVoiceModelConfig(Builder builder) {
|
||||
this.model = builder.model;
|
||||
this.language = builder.language;
|
||||
this.useInverseTextNormalization = builder.useInverseTextNormalization;
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
public String getModel() {
|
||||
return model;
|
||||
}
|
||||
|
||||
public String getLanguage() {
|
||||
return language;
|
||||
}
|
||||
|
||||
public boolean getUseInverseTextNormalization() {
|
||||
return useInverseTextNormalization;
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private String model = "";
|
||||
private String language = "";
|
||||
private boolean useInverseTextNormalization = true;
|
||||
|
||||
public OfflineSenseVoiceModelConfig build() {
|
||||
return new OfflineSenseVoiceModelConfig(this);
|
||||
}
|
||||
|
||||
public Builder setModel(String model) {
|
||||
this.model = model;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setLanguage(String language) {
|
||||
this.language = language;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setInverseTextNormalization(boolean useInverseTextNormalization) {
|
||||
this.useInverseTextNormalization = useInverseTextNormalization;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -171,6 +171,31 @@ static OfflineRecognizerConfig GetOfflineConfig(JNIEnv *env, jobject config) {
|
||||
ans.model_config.whisper.tail_paddings =
|
||||
env->GetIntField(whisper_config, fid);
|
||||
|
||||
// sense voice
|
||||
fid = env->GetFieldID(model_config_cls, "senseVoice",
|
||||
"Lcom/k2fsa/sherpa/onnx/OfflineSenseVoiceModelConfig;");
|
||||
jobject sense_voice_config = env->GetObjectField(model_config, fid);
|
||||
jclass sense_voice_config_cls = env->GetObjectClass(sense_voice_config);
|
||||
|
||||
fid = env->GetFieldID(sense_voice_config_cls, "model", "Ljava/lang/String;");
|
||||
s = (jstring)env->GetObjectField(sense_voice_config, fid);
|
||||
p = env->GetStringUTFChars(s, nullptr);
|
||||
ans.model_config.sense_voice.model = p;
|
||||
env->ReleaseStringUTFChars(s, p);
|
||||
|
||||
fid =
|
||||
env->GetFieldID(sense_voice_config_cls, "language", "Ljava/lang/String;");
|
||||
s = (jstring)env->GetObjectField(sense_voice_config, fid);
|
||||
p = env->GetStringUTFChars(s, nullptr);
|
||||
ans.model_config.sense_voice.language = p;
|
||||
env->ReleaseStringUTFChars(s, p);
|
||||
|
||||
fid = env->GetFieldID(sense_voice_config_cls, "useInverseTextNormalization",
|
||||
"Z");
|
||||
ans.model_config.sense_voice.use_itn =
|
||||
env->GetBooleanField(sense_voice_config, fid);
|
||||
|
||||
// nemo
|
||||
fid = env->GetFieldID(
|
||||
model_config_cls, "nemo",
|
||||
"Lcom/k2fsa/sherpa/onnx/OfflineNemoEncDecCtcModelConfig;");
|
||||
|
||||
@@ -30,11 +30,18 @@ data class OfflineWhisperModelConfig(
|
||||
var tailPaddings: Int = 1000, // Padding added at the end of the samples
|
||||
)
|
||||
|
||||
data class OfflineSenseVoiceModelConfig(
|
||||
var model: String = "",
|
||||
var language: String = "",
|
||||
var useInverseTextNormalization: Boolean = true,
|
||||
)
|
||||
|
||||
data class OfflineModelConfig(
|
||||
var transducer: OfflineTransducerModelConfig = OfflineTransducerModelConfig(),
|
||||
var paraformer: OfflineParaformerModelConfig = OfflineParaformerModelConfig(),
|
||||
var whisper: OfflineWhisperModelConfig = OfflineWhisperModelConfig(),
|
||||
var nemo: OfflineNemoEncDecCtcModelConfig = OfflineNemoEncDecCtcModelConfig(),
|
||||
var senseVoice: OfflineSenseVoiceModelConfig = OfflineSenseVoiceModelConfig(),
|
||||
var teleSpeech: String = "",
|
||||
var numThreads: Int = 1,
|
||||
var debug: Boolean = false,
|
||||
@@ -321,6 +328,16 @@ fun getOfflineModelConfig(type: Int): OfflineModelConfig? {
|
||||
modelType = "paraformer",
|
||||
)
|
||||
}
|
||||
|
||||
15 -> {
|
||||
val modelDir = "sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17"
|
||||
return OfflineModelConfig(
|
||||
senseVoice = OfflineSenseVoiceModelConfig(
|
||||
model = "$modelDir/model.int8.onnx",
|
||||
),
|
||||
tokens = "$modelDir/tokens.txt",
|
||||
)
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user