add java wrapper suppport (#117)

This commit is contained in:
zhaomingwork
2023-04-15 22:17:28 +08:00
committed by GitHub
parent 916e5adc1e
commit 4b5d2887cb
14 changed files with 1196 additions and 3 deletions

View File

@@ -0,0 +1,29 @@
/*
* // Copyright 2022-2023 by zhaoming
*/
package com.k2fsa.sherpa.onnx;
public class EndpointConfig {
private final EndpointRule rule1;
private final EndpointRule rule2;
private final EndpointRule rule3;
public EndpointConfig(EndpointRule rule1, EndpointRule rule2, EndpointRule rule3) {
this.rule1 = rule1;
this.rule2 = rule2;
this.rule3 = rule3;
}
public EndpointRule getRule1() {
return rule1;
}
public EndpointRule getRule2() {
return rule2;
}
public EndpointRule getRule3() {
return rule3;
}
}

View File

@@ -0,0 +1,30 @@
/*
* // Copyright 2022-2023 by zhaoming
*/
package com.k2fsa.sherpa.onnx;
public class EndpointRule {
private final boolean mustContainNonSilence;
private final float minTrailingSilence;
private final float minUtteranceLength;
public EndpointRule(
boolean mustContainNonSilence, float minTrailingSilence, float minUtteranceLength) {
this.mustContainNonSilence = mustContainNonSilence;
this.minTrailingSilence = minTrailingSilence;
this.minUtteranceLength = minUtteranceLength;
}
public float getMinTrailingSilence() {
return minTrailingSilence;
}
public float getMinUtteranceLength() {
return minUtteranceLength;
}
public boolean getMustContainNonSilence() {
return mustContainNonSilence;
}
}

View File

@@ -0,0 +1,23 @@
/*
* // Copyright 2022-2023 by zhaoming
*/
package com.k2fsa.sherpa.onnx;
public class FeatureConfig {
private final int sampleRate;
private final int featureDim;
public FeatureConfig(int sampleRate, int featureDim) {
this.sampleRate = sampleRate;
this.featureDim = featureDim;
}
public int getSampleRate() {
return sampleRate;
}
public int getFeatureDim() {
return featureDim;
}
}

View File

@@ -0,0 +1,304 @@
/*
* // Copyright 2022-2023 by zhaoming
* // the online recognizer for sherpa-onnx, it can load config from a file
* // or by argument
*/
/*
usage example:
String cfgpath=appdir+"/modelconfig.cfg";
OnlineRecognizer.setSoPath(soPath); //set so lib path
OnlineRecognizer rcgOjb = new OnlineRecognizer(); //create a recognizer
rcgOjb = new OnlineRecognizer(cfgFile); //set model config file
CreateStream streamObj=rcgOjb.CreateStream(); //create a stream for read wav data
float[] buffer = rcgOjb.readWavFile(wavfilename); // read data from file
streamObj.acceptWaveform(buffer); // feed stream with data
streamObj.inputFinished(); // tell engine you done with all data
OnlineStream ssObj[] = new OnlineStream[1];
while (rcgOjb.isReady(streamObj)) { // engine is ready for unprocessed data
ssObj[0] = streamObj;
rcgOjb.decodeStreams(ssObj); // decode for multiple stream
// rcgOjb.DecodeStream(streamObj); // decode for single stream
}
String recText = "simple:" + rcgOjb.getResult(streamObj) + "\n";
byte[] utf8Data = recText.getBytes(StandardCharsets.UTF_8);
System.out.println(new String(utf8Data));
rcgOjb.reSet(streamObj);
rcgOjb.releaseStream(streamObj); // release stream
rcgOjb.release(); // release recognizer
*/
package com.k2fsa.sherpa.onnx;
import java.io.*;
import java.util.*;
public class OnlineRecognizer {
private long ptr = 0; // this is the asr engine ptrss
private int sampleRate = 16000;
// load config file for OnlineRecognizer
public OnlineRecognizer(String modelCfgPath) {
Map<String, String> proMap = this.readProperties(modelCfgPath);
try {
int sampleRate = Integer.parseInt(proMap.get("sample_rate").trim());
this.sampleRate = sampleRate;
EndpointRule rule1 =
new EndpointRule(
false, Float.parseFloat(proMap.get("rule1_min_trailing_silence").trim()), 0.0F);
EndpointRule rule2 =
new EndpointRule(
true, Float.parseFloat(proMap.get("rule2_min_trailing_silence").trim()), 0.0F);
EndpointRule rule3 =
new EndpointRule(
false, 0.0F, Float.parseFloat(proMap.get("rule3_min_utterance_length").trim()));
EndpointConfig endCfg = new EndpointConfig(rule1, rule2, rule3);
OnlineTransducerModelConfig modelCfg =
new OnlineTransducerModelConfig(
proMap.get("encoder").trim(),
proMap.get("decoder").trim(),
proMap.get("joiner").trim(),
proMap.get("tokens").trim(),
Integer.parseInt(proMap.get("num_threads").trim()),
false);
FeatureConfig featConfig =
new FeatureConfig(sampleRate, Integer.parseInt(proMap.get("feature_dim").trim()));
OnlineRecognizerConfig rcgCfg =
new OnlineRecognizerConfig(
featConfig,
modelCfg,
endCfg,
Boolean.parseBoolean(proMap.get("enable_endpoint_detection").trim()),
proMap.get("decoding_method").trim(),
Integer.parseInt(proMap.get("max_active_paths").trim()));
// create a new Recognizer, first parameter kept for android asset_manager ANDROID_API__ >= 9
this.ptr = createOnlineRecognizer(new Object(), rcgCfg);
} catch (Exception e) {
System.err.println(e);
}
}
// use for android asset_manager ANDROID_API__ >= 9
public OnlineRecognizer(Object assetManager, String modelCfgPath) {
Map<String, String> proMap = this.readProperties(modelCfgPath);
try {
int sampleRate = Integer.parseInt(proMap.get("sample_rate").trim());
this.sampleRate = sampleRate;
EndpointRule rule1 =
new EndpointRule(
false, Float.parseFloat(proMap.get("rule1_min_trailing_silence").trim()), 0.0F);
EndpointRule rule2 =
new EndpointRule(
true, Float.parseFloat(proMap.get("rule2_min_trailing_silence").trim()), 0.0F);
EndpointRule rule3 =
new EndpointRule(
false, 0.0F, Float.parseFloat(proMap.get("rule3_min_utterance_length").trim()));
EndpointConfig endCfg = new EndpointConfig(rule1, rule2, rule3);
OnlineTransducerModelConfig modelCfg =
new OnlineTransducerModelConfig(
proMap.get("encoder").trim(),
proMap.get("decoder").trim(),
proMap.get("joiner").trim(),
proMap.get("tokens").trim(),
Integer.parseInt(proMap.get("num_threads").trim()),
false);
FeatureConfig featConfig =
new FeatureConfig(sampleRate, Integer.parseInt(proMap.get("feature_dim").trim()));
OnlineRecognizerConfig rcgCfg =
new OnlineRecognizerConfig(
featConfig,
modelCfg,
endCfg,
Boolean.parseBoolean(proMap.get("enable_endpoint_detection").trim()),
proMap.get("decoding_method").trim(),
Integer.parseInt(proMap.get("max_active_paths").trim()));
// create a new Recognizer, first parameter kept for android asset_manager ANDROID_API__ >= 9
this.ptr = createOnlineRecognizer(assetManager, rcgCfg);
} catch (Exception e) {
System.err.println(e);
}
}
// set onlineRecognizer by parameter
public OnlineRecognizer(
String tokens,
String encoder,
String decoder,
String joiner,
int numThreads,
int sampleRate,
int featureDim,
boolean enableEndpointDetection,
float rule1MinTrailingSilence,
float rule2MinTrailingSilence,
float rule3MinUtteranceLength,
String decodingMethod,
int maxActivePaths) {
this.sampleRate = sampleRate;
EndpointRule rule1 = new EndpointRule(false, rule1MinTrailingSilence, 0.0F);
EndpointRule rule2 = new EndpointRule(true, rule2MinTrailingSilence, 0.0F);
EndpointRule rule3 = new EndpointRule(false, 0.0F, rule3MinUtteranceLength);
EndpointConfig endCfg = new EndpointConfig(rule1, rule2, rule3);
OnlineTransducerModelConfig modelCfg =
new OnlineTransducerModelConfig(encoder, decoder, joiner, tokens, numThreads, false);
FeatureConfig featConfig = new FeatureConfig(sampleRate, featureDim);
OnlineRecognizerConfig rcgCfg =
new OnlineRecognizerConfig(
featConfig,
modelCfg,
endCfg,
enableEndpointDetection,
decodingMethod,
maxActivePaths);
// create a new Recognizer, first parameter kept for android asset_manager ANDROID_API__ >= 9
this.ptr = createOnlineRecognizer(new Object(), rcgCfg);
}
private Map<String, String> readProperties(String modelCfgPath) {
// read and parse config file
Properties props = new Properties();
Map<String, String> proMap = new HashMap<>();
try {
File file = new File(modelCfgPath);
if (!file.exists()) {
System.out.println("model cfg file not exists!");
System.exit(0);
}
InputStream in = new BufferedInputStream(new FileInputStream(modelCfgPath));
props.load(in);
Enumeration en = props.propertyNames();
while (en.hasMoreElements()) {
String key = (String) en.nextElement();
String Property = props.getProperty(key);
proMap.put(key, Property);
// System.out.println(key+"="+Property);
}
} catch (Exception e) {
e.printStackTrace();
}
return proMap;
}
public void decodeStream(OnlineStream s) throws Exception {
if (this.ptr == 0) throw new Exception("null exception for recognizer ptr");
long streamPtr = s.getPtr();
if (streamPtr == 0) throw new Exception("null exception for stream ptr");
// when feeded samples to engine, call DecodeStream to let it process
decodeStream(this.ptr, streamPtr);
}
public void decodeStreams(OnlineStream[] ssOjb) throws Exception {
if (this.ptr == 0) throw new Exception("null exception for recognizer ptr");
// decode for multiple streams
long[] ss = new long[ssOjb.length];
for (int i = 0; i < ssOjb.length; i++) {
ss[i] = ssOjb[i].getPtr();
if (ss[i] == 0) throw new Exception("null exception for stream ptr");
}
decodeStreams(this.ptr, ss);
}
public boolean isReady(OnlineStream s) throws Exception {
// whether the engine is ready for decode
if (this.ptr == 0) throw new Exception("null exception for recognizer ptr");
long streamPtr = s.getPtr();
if (streamPtr == 0) throw new Exception("null exception for stream ptr");
return isReady(this.ptr, streamPtr);
}
public String getResult(OnlineStream s) throws Exception {
// get text from the engine
if (this.ptr == 0) throw new Exception("null exception for recognizer ptr");
long streamPtr = s.getPtr();
if (streamPtr == 0) throw new Exception("null exception for stream ptr");
return getResult(this.ptr, streamPtr);
}
public boolean isEndpoint(OnlineStream s) throws Exception {
if (this.ptr == 0) throw new Exception("null exception for recognizer ptr");
long streamPtr = s.getPtr();
if (streamPtr == 0) throw new Exception("null exception for stream ptr");
return isEndpoint(this.ptr, streamPtr);
}
public void reSet(OnlineStream s) throws Exception {
if (this.ptr == 0) throw new Exception("null exception for recognizer ptr");
long streamPtr = s.getPtr();
if (streamPtr == 0) throw new Exception("null exception for stream ptr");
reSet(this.ptr, streamPtr);
}
public OnlineStream createStream() throws Exception {
// create one stream for data to feed in
if (this.ptr == 0) throw new Exception("null exception for recognizer ptr");
long streamPtr = createStream(this.ptr);
OnlineStream stream = new OnlineStream(streamPtr, this.sampleRate);
return stream;
}
public float[] readWavFile(String fileName) {
// read data from the filename
Object[] wavdata = readWave(fileName);
Object data = wavdata[0]; // data[0] is float data, data[1] sample rate
float[] floatData = (float[]) data;
return floatData;
}
// load the libsherpa-onnx-jni.so lib
public static void loadSoLib(String soPath) {
// load libsherpa-onnx-jni.so lib from the path
System.out.println("so lib path=" + soPath + "\n");
System.load(soPath.trim());
}
public static void setSoPath(String soPath) {
OnlineRecognizer.loadSoLib(soPath);
OnlineStream.loadSoLib(soPath);
}
protected void finalize() throws Throwable {
release();
}
// recognizer release, you'd better call it manually if not use anymore
public void release() {
if (this.ptr == 0) return;
deleteOnlineRecognizer(this.ptr);
this.ptr = 0;
}
// stream release, you'd better call it manually if not use anymore
public void releaseStream(OnlineStream s) {
s.release();
}
// JNI interface libsherpa-onnx-jni.so
private native Object[] readWave(String fileName);
private native String getResult(long ptr, long streamPtr);
private native void decodeStream(long ptr, long streamPtr);
private native void decodeStreams(long ptr, long[] ssPtr);
private native boolean isReady(long ptr, long streamPtr);
// first parameter keep for android asset_manager ANDROID_API__ >= 9
private native long createOnlineRecognizer(Object asset, OnlineRecognizerConfig config);
private native long createStream(long ptr);
private native void deleteOnlineRecognizer(long ptr);
private native boolean isEndpoint(long ptr, long streamPtr);
private native void reSet(long ptr, long streamPtr);
}

View File

@@ -0,0 +1,53 @@
/*
* // Copyright 2022-2023 by zhaoming
*/
package com.k2fsa.sherpa.onnx;
public class OnlineRecognizerConfig {
private final FeatureConfig featConfig;
private final OnlineTransducerModelConfig modelConfig;
private final EndpointConfig endpointConfig;
private final boolean enableEndpoint;
private final String decodingMethod;
private final int maxActivePaths;
public OnlineRecognizerConfig(
FeatureConfig featConfig,
OnlineTransducerModelConfig modelConfig,
EndpointConfig endpointConfig,
boolean enableEndpoint,
String decodingMethod,
int maxActivePaths) {
this.featConfig = featConfig;
this.modelConfig = modelConfig;
this.endpointConfig = endpointConfig;
this.enableEndpoint = enableEndpoint;
this.decodingMethod = decodingMethod;
this.maxActivePaths = maxActivePaths;
}
public FeatureConfig getFeatConfig() {
return featConfig;
}
public OnlineTransducerModelConfig getModelConfig() {
return modelConfig;
}
public EndpointConfig getEndpointConfig() {
return endpointConfig;
}
public boolean isEnableEndpoint() {
return enableEndpoint;
}
public String getDecodingMethod() {
return decodingMethod;
}
public int getMaxActivePaths() {
return maxActivePaths;
}
}

View File

@@ -0,0 +1,86 @@
/*
* // Copyright 2022-2023 by zhaoming
*/
// Stream is used for feeding data to the asr engine
package com.k2fsa.sherpa.onnx;
import java.io.*;
import java.util.*;
public class OnlineStream {
private long ptr = 0; // this is the stream ptr
private int sampleRate = 16000;
// assign ptr to this stream in construction
public OnlineStream(long ptr, int sampleRate) {
this.ptr = ptr;
this.sampleRate = sampleRate;
}
public long getPtr() {
return ptr;
}
public void acceptWaveform(float[] samples) throws Exception {
if (this.ptr == 0) throw new Exception("null exception for stream ptr");
// feed wave data to asr engine
acceptWaveform(this.ptr, this.sampleRate, samples);
}
public void inputFinished() {
// add some tail padding
int padLen = (int) (this.sampleRate * 0.3); // 0.3 seconds at 16 kHz sample rate
float tailPaddings[] = new float[padLen]; // default value is 0
acceptWaveform(this.ptr, this.sampleRate, tailPaddings);
// tell the engine all data are feeded
inputFinished(this.ptr);
}
public static void loadSoLib(String soPath) {
// load .so lib from the path
System.load(soPath.trim()); // ("sherpa-onnx-jni-java");
}
public void release() {
// stream object must be release after used
if (this.ptr == 0) return;
deleteStream(this.ptr);
this.ptr = 0;
}
protected void finalize() throws Throwable {
release();
}
public boolean isLastFrame() throws Exception {
if (this.ptr == 0) throw new Exception("null exception for stream ptr");
return isLastFrame(this.ptr);
}
public void reSet() throws Exception {
if (this.ptr == 0) throw new Exception("null exception for stream ptr");
reSet(this.ptr);
}
public int featureDim() throws Exception {
if (this.ptr == 0) throw new Exception("null exception for stream ptr");
return featureDim(this.ptr);
}
// JNI interface libsherpa-onnx-jni.so
private native void acceptWaveform(long ptr, int sampleRate, float[] samples);
private native void inputFinished(long ptr);
private native void deleteStream(long ptr);
private native int numFramesReady(long ptr);
private native boolean isLastFrame(long ptr);
private native void reSet(long ptr);
private native int featureDim(long ptr);
}

View File

@@ -0,0 +1,48 @@
/*
* // Copyright 2022-2023 by zhaoming
*/
package com.k2fsa.sherpa.onnx;
public class OnlineTransducerModelConfig {
private final String encoder;
private final String decoder;
private final String joiner;
private final String tokens;
private final int numThreads;
private final boolean debug;
public OnlineTransducerModelConfig(
String encoder, String decoder, String joiner, String tokens, int numThreads, boolean debug) {
this.encoder = encoder;
this.decoder = decoder;
this.joiner = joiner;
this.tokens = tokens;
this.numThreads = numThreads;
this.debug = debug;
}
public String getEncoder() {
return encoder;
}
public String getDecoder() {
return decoder;
}
public String getJoiner() {
return joiner;
}
public String getTokens() {
return tokens;
}
public int getNumThreads() {
return numThreads;
}
public boolean getDebug() {
return debug;
}
}