Refactor the JNI interface to make it more modular and maintainable (#802)

This commit is contained in:
Fangjun Kuang
2024-04-24 09:48:42 +08:00
committed by GitHub
parent dc5af04830
commit 9b67a476e6
116 changed files with 3502 additions and 3316 deletions

View File

@@ -16,6 +16,7 @@
tools:targetApi="31">
<activity
android:name=".MainActivity"
android:label="2pass ASR: Next-gen Kaldi"
android:exported="true">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
@@ -29,4 +30,4 @@
</activity>
</application>
</manifest>
</manifest>

View File

@@ -0,0 +1 @@
../../../../../../../../../../sherpa-onnx/kotlin-api/FeatureConfig.kt

View File

@@ -17,11 +17,13 @@ import kotlin.concurrent.thread
private const val TAG = "sherpa-onnx"
private const val REQUEST_RECORD_AUDIO_PERMISSION = 200
// adb emu avd hostmicon
// to enable microphone inside the emulator
class MainActivity : AppCompatActivity() {
private val permissions: Array<String> = arrayOf(Manifest.permission.RECORD_AUDIO)
private lateinit var onlineRecognizer: SherpaOnnx
private lateinit var offlineRecognizer: SherpaOnnxOffline
private lateinit var onlineRecognizer: OnlineRecognizer
private lateinit var offlineRecognizer: OfflineRecognizer
private var audioRecord: AudioRecord? = null
private lateinit var recordButton: Button
private lateinit var textView: TextView
@@ -93,7 +95,6 @@ class MainActivity : AppCompatActivity() {
audioRecord!!.startRecording()
recordButton.setText(R.string.stop)
isRecording = true
onlineRecognizer.reset(true)
samplesBuffer.clear()
textView.text = ""
lastText = ""
@@ -115,6 +116,7 @@ class MainActivity : AppCompatActivity() {
private fun processSamples() {
Log.i(TAG, "processing samples")
val stream = onlineRecognizer.createStream()
val interval = 0.1 // i.e., 100 ms
val bufferSize = (interval * sampleRateInHz).toInt() // in samples
@@ -126,29 +128,29 @@ class MainActivity : AppCompatActivity() {
val samples = FloatArray(ret) { buffer[it] / 32768.0f }
samplesBuffer.add(samples)
onlineRecognizer.acceptWaveform(samples, sampleRate = sampleRateInHz)
while (onlineRecognizer.isReady()) {
onlineRecognizer.decode()
stream.acceptWaveform(samples, sampleRate = sampleRateInHz)
while (onlineRecognizer.isReady(stream)) {
onlineRecognizer.decode(stream)
}
val isEndpoint = onlineRecognizer.isEndpoint()
val isEndpoint = onlineRecognizer.isEndpoint(stream)
var textToDisplay = lastText
var text = onlineRecognizer.text
var text = onlineRecognizer.getResult(stream).text
if (text.isNotBlank()) {
if (lastText.isBlank()) {
textToDisplay = if (lastText.isBlank()) {
// textView.text = "${idx}: ${text}"
textToDisplay = "${idx}: ${text}"
"${idx}: $text"
} else {
textToDisplay = "${lastText}\n${idx}: ${text}"
"${lastText}\n${idx}: $text"
}
}
if (isEndpoint) {
onlineRecognizer.reset()
onlineRecognizer.reset(stream)
if (text.isNotBlank()) {
text = runSecondPass()
lastText = "${lastText}\n${idx}: ${text}"
lastText = "${lastText}\n${idx}: $text"
idx += 1
} else {
samplesBuffer.clear()
@@ -160,6 +162,7 @@ class MainActivity : AppCompatActivity() {
}
}
}
stream.release()
}
private fun initMicrophone(): Boolean {
@@ -190,8 +193,8 @@ class MainActivity : AppCompatActivity() {
// Please change getModelConfig() to add new models
// See https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
// for a list of available models
val firstType = 1
println("Select model type ${firstType} for the first pass")
val firstType = 9
Log.i(TAG, "Select model type $firstType for the first pass")
val config = OnlineRecognizerConfig(
featConfig = getFeatureConfig(sampleRate = sampleRateInHz, featureDim = 80),
modelConfig = getModelConfig(type = firstType)!!,
@@ -199,7 +202,7 @@ class MainActivity : AppCompatActivity() {
enableEndpoint = true,
)
onlineRecognizer = SherpaOnnx(
onlineRecognizer = OnlineRecognizer(
assetManager = application.assets,
config = config,
)
@@ -209,15 +212,15 @@ class MainActivity : AppCompatActivity() {
// Please change getOfflineModelConfig() to add new models
// See https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
// for a list of available models
val secondType = 1
println("Select model type ${secondType} for the second pass")
val secondType = 0
Log.i(TAG, "Select model type $secondType for the second pass")
val config = OfflineRecognizerConfig(
featConfig = getFeatureConfig(sampleRate = sampleRateInHz, featureDim = 80),
modelConfig = getOfflineModelConfig(type = secondType)!!,
)
offlineRecognizer = SherpaOnnxOffline(
offlineRecognizer = OfflineRecognizer(
assetManager = application.assets,
config = config,
)
@@ -244,8 +247,15 @@ class MainActivity : AppCompatActivity() {
val n = maxOf(0, samples.size - 8000)
samplesBuffer.clear()
samplesBuffer.add(samples.sliceArray(n..samples.size-1))
samplesBuffer.add(samples.sliceArray(n until samples.size))
return offlineRecognizer.decode(samples.sliceArray(0..n), sampleRateInHz)
val stream = offlineRecognizer.createStream()
stream.acceptWaveform(samples.sliceArray(0..n), sampleRateInHz)
offlineRecognizer.decode(stream)
val result = offlineRecognizer.getResult(stream)
stream.release()
return result.text
}
}

View File

@@ -0,0 +1 @@
../../../../../../../../../../sherpa-onnx/kotlin-api/OfflineRecognizer.kt

View File

@@ -0,0 +1 @@
../../../../../../../../../../sherpa-onnx/kotlin-api/OfflineStream.kt

View File

@@ -0,0 +1 @@
../../../../../../../../../../sherpa-onnx/kotlin-api/OnlineRecognizer.kt

View File

@@ -0,0 +1 @@
../../../../../../../../../../sherpa-onnx/kotlin-api/OnlineStream.kt

View File

@@ -1,404 +0,0 @@
package com.k2fsa.sherpa.onnx
import android.content.res.AssetManager
data class EndpointRule(
var mustContainNonSilence: Boolean,
var minTrailingSilence: Float,
var minUtteranceLength: Float,
)
data class EndpointConfig(
var rule1: EndpointRule = EndpointRule(false, 2.0f, 0.0f),
var rule2: EndpointRule = EndpointRule(true, 1.2f, 0.0f),
var rule3: EndpointRule = EndpointRule(false, 0.0f, 20.0f)
)
data class OnlineTransducerModelConfig(
var encoder: String = "",
var decoder: String = "",
var joiner: String = "",
)
data class OnlineParaformerModelConfig(
var encoder: String = "",
var decoder: String = "",
)
data class OnlineZipformer2CtcModelConfig(
var model: String = "",
)
data class OnlineModelConfig(
var transducer: OnlineTransducerModelConfig = OnlineTransducerModelConfig(),
var paraformer: OnlineParaformerModelConfig = OnlineParaformerModelConfig(),
var zipformer2Ctc: OnlineZipformer2CtcModelConfig = OnlineZipformer2CtcModelConfig(),
var tokens: String,
var numThreads: Int = 1,
var debug: Boolean = false,
var provider: String = "cpu",
var modelType: String = "",
)
data class OnlineLMConfig(
var model: String = "",
var scale: Float = 0.5f,
)
data class FeatureConfig(
var sampleRate: Int = 16000,
var featureDim: Int = 80,
)
data class OnlineRecognizerConfig(
var featConfig: FeatureConfig = FeatureConfig(),
var modelConfig: OnlineModelConfig,
var lmConfig: OnlineLMConfig = OnlineLMConfig(),
var endpointConfig: EndpointConfig = EndpointConfig(),
var enableEndpoint: Boolean = true,
var decodingMethod: String = "greedy_search",
var maxActivePaths: Int = 4,
var hotwordsFile: String = "",
var hotwordsScore: Float = 1.5f,
)
data class OfflineTransducerModelConfig(
var encoder: String = "",
var decoder: String = "",
var joiner: String = "",
)
data class OfflineParaformerModelConfig(
var model: String = "",
)
data class OfflineWhisperModelConfig(
var encoder: String = "",
var decoder: String = "",
var language: String = "en", // Used with multilingual model
var task: String = "transcribe", // transcribe or translate
var tailPaddings: Int = 1000, // Padding added at the end of the samples
)
data class OfflineModelConfig(
var transducer: OfflineTransducerModelConfig = OfflineTransducerModelConfig(),
var paraformer: OfflineParaformerModelConfig = OfflineParaformerModelConfig(),
var whisper: OfflineWhisperModelConfig = OfflineWhisperModelConfig(),
var numThreads: Int = 1,
var debug: Boolean = false,
var provider: String = "cpu",
var modelType: String = "",
var tokens: String,
)
data class OfflineRecognizerConfig(
var featConfig: FeatureConfig = FeatureConfig(),
var modelConfig: OfflineModelConfig,
// var lmConfig: OfflineLMConfig(), // TODO(fangjun): enable it
var decodingMethod: String = "greedy_search",
var maxActivePaths: Int = 4,
var hotwordsFile: String = "",
var hotwordsScore: Float = 1.5f,
)
class SherpaOnnx(
assetManager: AssetManager? = null,
var config: OnlineRecognizerConfig,
) {
private val ptr: Long
init {
if (assetManager != null) {
ptr = new(assetManager, config)
} else {
ptr = newFromFile(config)
}
}
protected fun finalize() {
delete(ptr)
}
fun acceptWaveform(samples: FloatArray, sampleRate: Int) =
acceptWaveform(ptr, samples, sampleRate)
fun inputFinished() = inputFinished(ptr)
fun reset(recreate: Boolean = false, hotwords: String = "") = reset(ptr, recreate, hotwords)
fun decode() = decode(ptr)
fun isEndpoint(): Boolean = isEndpoint(ptr)
fun isReady(): Boolean = isReady(ptr)
val text: String
get() = getText(ptr)
val tokens: Array<String>
get() = getTokens(ptr)
private external fun delete(ptr: Long)
private external fun new(
assetManager: AssetManager,
config: OnlineRecognizerConfig,
): Long
private external fun newFromFile(
config: OnlineRecognizerConfig,
): Long
private external fun acceptWaveform(ptr: Long, samples: FloatArray, sampleRate: Int)
private external fun inputFinished(ptr: Long)
private external fun getText(ptr: Long): String
private external fun reset(ptr: Long, recreate: Boolean, hotwords: String)
private external fun decode(ptr: Long)
private external fun isEndpoint(ptr: Long): Boolean
private external fun isReady(ptr: Long): Boolean
private external fun getTokens(ptr: Long): Array<String>
companion object {
init {
System.loadLibrary("sherpa-onnx-jni")
}
}
}
class SherpaOnnxOffline(
assetManager: AssetManager? = null,
var config: OfflineRecognizerConfig,
) {
private val ptr: Long
init {
if (assetManager != null) {
ptr = new(assetManager, config)
} else {
ptr = newFromFile(config)
}
}
protected fun finalize() {
delete(ptr)
}
fun decode(samples: FloatArray, sampleRate: Int) = decode(ptr, samples, sampleRate)
private external fun delete(ptr: Long)
private external fun new(
assetManager: AssetManager,
config: OfflineRecognizerConfig,
): Long
private external fun newFromFile(
config: OfflineRecognizerConfig,
): Long
private external fun decode(ptr: Long, samples: FloatArray, sampleRate: Int): String
companion object {
init {
System.loadLibrary("sherpa-onnx-jni")
}
}
}
fun getFeatureConfig(sampleRate: Int, featureDim: Int): FeatureConfig {
return FeatureConfig(sampleRate = sampleRate, featureDim = featureDim)
}
/*
Please see
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
for a list of pre-trained models.
We only add a few here. Please change the following code
to add your own. (It should be straightforward to add a new model
by following the code)
@param type
0 - csukuangfj/sherpa-onnx-streaming-zipformer-zh-14M-2023-02-23 (Chinese)
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#sherpa-onnx-streaming-zipformer-zh-14m-2023-02-23
encoder/joiner int8, decoder float32
1 - csukuangfj/sherpa-onnx-streaming-zipformer-en-20M-2023-02-17 (English)
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/online-transducer/zipformer-transducer-models.html#csukuangfj-sherpa-onnx-streaming-zipformer-en-20m-2023-02-17-english
encoder/joiner int8, decoder fp32
*/
fun getModelConfig(type: Int): OnlineModelConfig? {
when (type) {
0 -> {
val modelDir = "sherpa-onnx-streaming-zipformer-zh-14M-2023-02-23"
return OnlineModelConfig(
transducer = OnlineTransducerModelConfig(
encoder = "$modelDir/encoder-epoch-99-avg-1.int8.onnx",
decoder = "$modelDir/decoder-epoch-99-avg-1.onnx",
joiner = "$modelDir/joiner-epoch-99-avg-1.int8.onnx",
),
tokens = "$modelDir/tokens.txt",
modelType = "zipformer",
)
}
1 -> {
val modelDir = "sherpa-onnx-streaming-zipformer-en-20M-2023-02-17"
return OnlineModelConfig(
transducer = OnlineTransducerModelConfig(
encoder = "$modelDir/encoder-epoch-99-avg-1.int8.onnx",
decoder = "$modelDir/decoder-epoch-99-avg-1.onnx",
joiner = "$modelDir/joiner-epoch-99-avg-1.int8.onnx",
),
tokens = "$modelDir/tokens.txt",
modelType = "zipformer",
)
}
}
return null
}
/*
Please see
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
for a list of pre-trained models.
We only add a few here. Please change the following code
to add your own LM model. (It should be straightforward to train a new NN LM model
by following the code, https://github.com/k2-fsa/icefall/blob/master/icefall/rnn_lm/train.py)
@param type
0 - sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20 (Bilingual, Chinese + English)
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/zipformer-transducer-models.html#sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20-bilingual-chinese-english
*/
fun getOnlineLMConfig(type: Int): OnlineLMConfig {
when (type) {
0 -> {
val modelDir = "sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20"
return OnlineLMConfig(
model = "$modelDir/with-state-epoch-99-avg-1.int8.onnx",
scale = 0.5f,
)
}
}
return OnlineLMConfig()
}
// for English models, use a small value for rule2.minTrailingSilence, e.g., 0.8
fun getEndpointConfig(): EndpointConfig {
return EndpointConfig(
rule1 = EndpointRule(false, 2.4f, 0.0f),
rule2 = EndpointRule(true, 0.8f, 0.0f),
rule3 = EndpointRule(false, 0.0f, 20.0f)
)
}
/*
Please see
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html
for a list of pre-trained models.
We only add a few here. Please change the following code
to add your own. (It should be straightforward to add a new model
by following the code)
@param type
0 - csukuangfj/sherpa-onnx-paraformer-zh-2023-03-28 (Chinese)
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/offline-paraformer/paraformer-models.html#csukuangfj-sherpa-onnx-paraformer-zh-2023-03-28-chinese
int8
1 - icefall-asr-multidataset-pruned_transducer_stateless7-2023-05-04 (English)
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/offline-transducer/zipformer-transducer-models.html#icefall-asr-multidataset-pruned-transducer-stateless7-2023-05-04-english
encoder int8, decoder/joiner float32
2 - sherpa-onnx-whisper-tiny.en
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/whisper/tiny.en.html#tiny-en
encoder int8, decoder int8
3 - sherpa-onnx-whisper-base.en
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/whisper/tiny.en.html#tiny-en
encoder int8, decoder int8
4 - pkufool/icefall-asr-zipformer-wenetspeech-20230615 (Chinese)
https://k2-fsa.github.io/sherpa/onnx/pretrained_models/offline-transducer/zipformer-transducer-models.html#pkufool-icefall-asr-zipformer-wenetspeech-20230615-chinese
encoder/joiner int8, decoder fp32
*/
fun getOfflineModelConfig(type: Int): OfflineModelConfig? {
when (type) {
0 -> {
val modelDir = "sherpa-onnx-paraformer-zh-2023-03-28"
return OfflineModelConfig(
paraformer = OfflineParaformerModelConfig(
model = "$modelDir/model.int8.onnx",
),
tokens = "$modelDir/tokens.txt",
modelType = "paraformer",
)
}
1 -> {
val modelDir = "icefall-asr-multidataset-pruned_transducer_stateless7-2023-05-04"
return OfflineModelConfig(
transducer = OfflineTransducerModelConfig(
encoder = "$modelDir/encoder-epoch-30-avg-4.int8.onnx",
decoder = "$modelDir/decoder-epoch-30-avg-4.onnx",
joiner = "$modelDir/joiner-epoch-30-avg-4.onnx",
),
tokens = "$modelDir/tokens.txt",
modelType = "zipformer",
)
}
2 -> {
val modelDir = "sherpa-onnx-whisper-tiny.en"
return OfflineModelConfig(
whisper = OfflineWhisperModelConfig(
encoder = "$modelDir/tiny.en-encoder.int8.onnx",
decoder = "$modelDir/tiny.en-decoder.int8.onnx",
),
tokens = "$modelDir/tiny.en-tokens.txt",
modelType = "whisper",
)
}
3 -> {
val modelDir = "sherpa-onnx-whisper-base.en"
return OfflineModelConfig(
whisper = OfflineWhisperModelConfig(
encoder = "$modelDir/base.en-encoder.int8.onnx",
decoder = "$modelDir/base.en-decoder.int8.onnx",
),
tokens = "$modelDir/base.en-tokens.txt",
modelType = "whisper",
)
}
4 -> {
val modelDir = "icefall-asr-zipformer-wenetspeech-20230615"
return OfflineModelConfig(
transducer = OfflineTransducerModelConfig(
encoder = "$modelDir/encoder-epoch-12-avg-4.int8.onnx",
decoder = "$modelDir/decoder-epoch-12-avg-4.onnx",
joiner = "$modelDir/joiner-epoch-12-avg-4.int8.onnx",
),
tokens = "$modelDir/tokens.txt",
modelType = "zipformer",
)
}
5 -> {
val modelDir = "sherpa-onnx-zipformer-multi-zh-hans-2023-9-2"
return OfflineModelConfig(
transducer = OfflineTransducerModelConfig(
encoder = "$modelDir/encoder-epoch-20-avg-1.int8.onnx",
decoder = "$modelDir/decoder-epoch-20-avg-1.onnx",
joiner = "$modelDir/joiner-epoch-20-avg-1.int8.onnx",
),
tokens = "$modelDir/tokens.txt",
modelType = "zipformer2",
)
}
}
return null
}

View File

@@ -1,28 +0,0 @@
package com.k2fsa.sherpa.onnx
import android.content.res.AssetManager
class WaveReader {
companion object {
// Read a mono wave file asset
// The returned array has two entries:
// - the first entry contains an 1-D float array
// - the second entry is the sample rate
external fun readWaveFromAsset(
assetManager: AssetManager,
filename: String,
): Array<Any>
// Read a mono wave file from disk
// The returned array has two entries:
// - the first entry contains an 1-D float array
// - the second entry is the sample rate
external fun readWaveFromFile(
filename: String,
): Array<Any>
init {
System.loadLibrary("sherpa-onnx-jni")
}
}
}