Add dart API for SenseVoice (#1159)

This commit is contained in:
Fangjun Kuang
2024-07-21 21:48:12 +08:00
committed by GitHub
parent 70d14353bb
commit ffdb23a8ec
17 changed files with 169 additions and 49 deletions

View File

@@ -6,6 +6,10 @@ cd dart-api-examples
pushd non-streaming-asr pushd non-streaming-asr
echo '----------SenseVoice----------'
./run-sense-voice.sh
rm -rf sherpa-onnx-*
echo '----------NeMo transducer----------' echo '----------NeMo transducer----------'
./run-nemo-transducer.sh ./run-nemo-transducer.sh
rm -rf sherpa-onnx-* rm -rf sherpa-onnx-*

View File

@@ -11,4 +11,5 @@ This folder contains examples for non-streaming ASR with Dart API.
|[./bin/whisper.dart](./bin/whisper.dart)| Use whisper for speech recognition. See [./run-whisper.sh](./run-whisper.sh)| |[./bin/whisper.dart](./bin/whisper.dart)| Use whisper for speech recognition. See [./run-whisper.sh](./run-whisper.sh)|
|[./bin/zipformer-transducer.dart](./bin/zipformer-transducer.dart)| Use a zipformer transducer for speech recognition. See [./run-zipformer-transducer.sh](./run-zipformer-transducer.sh)| |[./bin/zipformer-transducer.dart](./bin/zipformer-transducer.dart)| Use a zipformer transducer for speech recognition. See [./run-zipformer-transducer.sh](./run-zipformer-transducer.sh)|
|[./bin/vad-with-paraformer.dart](./bin/vad-with-paraformer.dart)| Use a [silero-vad](https://github.com/snakers4/silero-vad) with paraformer for speech recognition. See [./run-vad-with-paraformer.sh](./run-vad-with-paraformer.sh)| |[./bin/vad-with-paraformer.dart](./bin/vad-with-paraformer.dart)| Use a [silero-vad](https://github.com/snakers4/silero-vad) with paraformer for speech recognition. See [./run-vad-with-paraformer.sh](./run-vad-with-paraformer.sh)|
|[./bin/sense-voice.dart](./bin/sense-voice.dart)| Use a SenseVoice CTC model for speech recognition. See [./run-sense-voice.sh](./run-sense-voice.sh)|

View File

@@ -0,0 +1,61 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the paraformer model')
..addOption('tokens', help: 'Path to tokens.txt')
..addOption('language',
help: 'auto, zh, en, ja, ko, yue, or leave it empty to use auto',
defaultsTo: '')
..addOption('use-itn',
help: 'true to use inverse text normalization', defaultsTo: 'false')
..addOption('input-wav', help: 'Path to input.wav to transcribe');
final res = parser.parse(arguments);
if (res['model'] == null ||
res['tokens'] == null ||
res['input-wav'] == null) {
print(parser.usage);
exit(1);
}
final model = res['model'] as String;
final tokens = res['tokens'] as String;
final inputWav = res['input-wav'] as String;
final language = res['language'] as String;
final useItn = (res['use-itn'] as String).toLowerCase() == 'true';
final senseVoice = sherpa_onnx.OfflineSenseVoiceModelConfig(
model: model, language: language, useInverseTextNormalization: useItn);
final modelConfig = sherpa_onnx.OfflineModelConfig(
senseVoice: senseVoice,
tokens: tokens,
debug: true,
numThreads: 1,
);
final config = sherpa_onnx.OfflineRecognizerConfig(model: modelConfig);
final recognizer = sherpa_onnx.OfflineRecognizer(config);
final waveData = sherpa_onnx.readWave(inputWav);
final stream = recognizer.createStream();
stream.acceptWaveform(
samples: waveData.samples, sampleRate: waveData.sampleRate);
recognizer.decode(stream);
final result = recognizer.getResult(stream);
print(result.text);
stream.free();
recognizer.free();
}

View File

@@ -10,7 +10,7 @@ environment:
# Add regular dependencies here. # Add regular dependencies here.
dependencies: dependencies:
sherpa_onnx: ^1.10.16 sherpa_onnx: ^1.10.17
path: ^1.9.0 path: ^1.9.0
args: ^2.5.0 args: ^2.5.0

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -ex
dart pub get
if [ ! -f ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt ]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2
fi
dart run \
./bin/sense-voice.dart \
--model ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx \
--tokens ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt \
--use-itn true \
--input-wav ./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/zh.wav

View File

@@ -11,7 +11,7 @@ environment:
# Add regular dependencies here. # Add regular dependencies here.
dependencies: dependencies:
sherpa_onnx: ^1.10.16 sherpa_onnx: ^1.10.17
path: ^1.9.0 path: ^1.9.0
args: ^2.5.0 args: ^2.5.0

View File

@@ -8,7 +8,7 @@ environment:
# Add regular dependencies here. # Add regular dependencies here.
dependencies: dependencies:
sherpa_onnx: ^1.10.16 sherpa_onnx: ^1.10.17
path: ^1.9.0 path: ^1.9.0
args: ^2.5.0 args: ^2.5.0

View File

@@ -9,7 +9,7 @@ environment:
sdk: ^3.4.0 sdk: ^3.4.0
dependencies: dependencies:
sherpa_onnx: ^1.10.16 sherpa_onnx: ^1.10.17
path: ^1.9.0 path: ^1.9.0
args: ^2.5.0 args: ^2.5.0

View File

@@ -5,7 +5,7 @@ description: >
publish_to: 'none' publish_to: 'none'
version: 1.10.16 version: 1.10.17
topics: topics:
- speech-recognition - speech-recognition
@@ -30,7 +30,7 @@ dependencies:
record: ^5.1.0 record: ^5.1.0
url_launcher: ^6.2.6 url_launcher: ^6.2.6
sherpa_onnx: ^1.10.16 sherpa_onnx: ^1.10.17
# sherpa_onnx: # sherpa_onnx:
# path: ../../flutter/sherpa_onnx # path: ../../flutter/sherpa_onnx

View File

@@ -5,7 +5,7 @@ description: >
publish_to: 'none' # Remove this line if you wish to publish to pub.dev publish_to: 'none' # Remove this line if you wish to publish to pub.dev
version: 1.10.16 version: 1.10.17
environment: environment:
sdk: '>=3.4.0 <4.0.0' sdk: '>=3.4.0 <4.0.0'
@@ -17,7 +17,7 @@ dependencies:
cupertino_icons: ^1.0.6 cupertino_icons: ^1.0.6
path_provider: ^2.1.3 path_provider: ^2.1.3
path: ^1.9.0 path: ^1.9.0
sherpa_onnx: ^1.10.16 sherpa_onnx: ^1.10.17
url_launcher: ^6.2.6 url_launcher: ^6.2.6
audioplayers: ^5.0.0 audioplayers: ^5.0.0

View File

@@ -79,6 +79,23 @@ class OfflineTdnnModelConfig {
final String model; final String model;
} }
class OfflineSenseVoiceModelConfig {
const OfflineSenseVoiceModelConfig({
this.model = '',
this.language = '',
this.useInverseTextNormalization = false,
});
@override
String toString() {
return 'OfflineSenseVoiceModelConfig(model: $model, language: $language, useInverseTextNormalization: $useInverseTextNormalization)';
}
final String model;
final String language;
final bool useInverseTextNormalization;
}
class OfflineLMConfig { class OfflineLMConfig {
const OfflineLMConfig({this.model = '', this.scale = 1.0}); const OfflineLMConfig({this.model = '', this.scale = 1.0});
@@ -98,6 +115,7 @@ class OfflineModelConfig {
this.nemoCtc = const OfflineNemoEncDecCtcModelConfig(), this.nemoCtc = const OfflineNemoEncDecCtcModelConfig(),
this.whisper = const OfflineWhisperModelConfig(), this.whisper = const OfflineWhisperModelConfig(),
this.tdnn = const OfflineTdnnModelConfig(), this.tdnn = const OfflineTdnnModelConfig(),
this.senseVoice = const OfflineSenseVoiceModelConfig(),
required this.tokens, required this.tokens,
this.numThreads = 1, this.numThreads = 1,
this.debug = true, this.debug = true,
@@ -110,7 +128,7 @@ class OfflineModelConfig {
@override @override
String toString() { String toString() {
return 'OfflineModelConfig(transducer: $transducer, paraformer: $paraformer, nemoCtc: $nemoCtc, whisper: $whisper, tdnn: $tdnn, tokens: $tokens, numThreads: $numThreads, debug: $debug, provider: $provider, modelType: $modelType, modelingUnit: $modelingUnit, bpeVocab: $bpeVocab, telespeechCtc: $telespeechCtc)'; return 'OfflineModelConfig(transducer: $transducer, paraformer: $paraformer, nemoCtc: $nemoCtc, whisper: $whisper, tdnn: $tdnn, senseVoice: $senseVoice, tokens: $tokens, numThreads: $numThreads, debug: $debug, provider: $provider, modelType: $modelType, modelingUnit: $modelingUnit, bpeVocab: $bpeVocab, telespeechCtc: $telespeechCtc)';
} }
final OfflineTransducerModelConfig transducer; final OfflineTransducerModelConfig transducer;
@@ -118,6 +136,7 @@ class OfflineModelConfig {
final OfflineNemoEncDecCtcModelConfig nemoCtc; final OfflineNemoEncDecCtcModelConfig nemoCtc;
final OfflineWhisperModelConfig whisper; final OfflineWhisperModelConfig whisper;
final OfflineTdnnModelConfig tdnn; final OfflineTdnnModelConfig tdnn;
final OfflineSenseVoiceModelConfig senseVoice;
final String tokens; final String tokens;
final int numThreads; final int numThreads;
@@ -219,6 +238,14 @@ class OfflineRecognizer {
c.ref.model.tdnn.model = config.model.tdnn.model.toNativeUtf8(); c.ref.model.tdnn.model = config.model.tdnn.model.toNativeUtf8();
c.ref.model.senseVoice.model = config.model.senseVoice.model.toNativeUtf8();
c.ref.model.senseVoice.language =
config.model.senseVoice.language.toNativeUtf8();
c.ref.model.senseVoice.useInverseTextNormalization =
config.model.senseVoice.useInverseTextNormalization ? 1 : 0;
c.ref.model.tokens = config.model.tokens.toNativeUtf8(); c.ref.model.tokens = config.model.tokens.toNativeUtf8();
c.ref.model.numThreads = config.model.numThreads; c.ref.model.numThreads = config.model.numThreads;
@@ -254,6 +281,8 @@ class OfflineRecognizer {
calloc.free(c.ref.model.modelType); calloc.free(c.ref.model.modelType);
calloc.free(c.ref.model.provider); calloc.free(c.ref.model.provider);
calloc.free(c.ref.model.tokens); calloc.free(c.ref.model.tokens);
calloc.free(c.ref.model.senseVoice.language);
calloc.free(c.ref.model.senseVoice.model);
calloc.free(c.ref.model.tdnn.model); calloc.free(c.ref.model.tdnn.model);
calloc.free(c.ref.model.whisper.task); calloc.free(c.ref.model.whisper.task);
calloc.free(c.ref.model.whisper.language); calloc.free(c.ref.model.whisper.language);

View File

@@ -87,6 +87,14 @@ final class SherpaOnnxOfflineTdnnModelConfig extends Struct {
external Pointer<Utf8> model; external Pointer<Utf8> model;
} }
final class SherpaOnnxOfflineSenseVoiceModelConfig extends Struct {
external Pointer<Utf8> model;
external Pointer<Utf8> language;
@Int32()
external int useInverseTextNormalization;
}
final class SherpaOnnxOfflineLMConfig extends Struct { final class SherpaOnnxOfflineLMConfig extends Struct {
external Pointer<Utf8> model; external Pointer<Utf8> model;
@@ -115,6 +123,8 @@ final class SherpaOnnxOfflineModelConfig extends Struct {
external Pointer<Utf8> modelingUnit; external Pointer<Utf8> modelingUnit;
external Pointer<Utf8> bpeVocab; external Pointer<Utf8> bpeVocab;
external Pointer<Utf8> telespeechCtc; external Pointer<Utf8> telespeechCtc;
external SherpaOnnxOfflineSenseVoiceModelConfig senseVoice;
} }
final class SherpaOnnxOfflineRecognizerConfig extends Struct { final class SherpaOnnxOfflineRecognizerConfig extends Struct {

View File

@@ -17,7 +17,7 @@ topics:
- voice-activity-detection - voice-activity-detection
# remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx_macos.podspec # remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx_macos.podspec
version: 1.10.16 version: 1.10.17
homepage: https://github.com/k2-fsa/sherpa-onnx homepage: https://github.com/k2-fsa/sherpa-onnx
@@ -30,19 +30,19 @@ dependencies:
flutter: flutter:
sdk: flutter sdk: flutter
sherpa_onnx_android: ^1.10.16 sherpa_onnx_android: ^1.10.17
# path: ../sherpa_onnx_android # path: ../sherpa_onnx_android
sherpa_onnx_macos: ^1.10.16 sherpa_onnx_macos: ^1.10.17
# path: ../sherpa_onnx_macos # path: ../sherpa_onnx_macos
sherpa_onnx_linux: ^1.10.16 sherpa_onnx_linux: ^1.10.17
# path: ../sherpa_onnx_linux # path: ../sherpa_onnx_linux
# #
sherpa_onnx_windows: ^1.10.16 sherpa_onnx_windows: ^1.10.17
# path: ../sherpa_onnx_windows # path: ../sherpa_onnx_windows
sherpa_onnx_ios: ^1.10.16 sherpa_onnx_ios: ^1.10.17
# sherpa_onnx_ios: # sherpa_onnx_ios:
# path: ../sherpa_onnx_ios # path: ../sherpa_onnx_ios

View File

@@ -7,7 +7,7 @@
# https://groups.google.com/g/dart-ffi/c/nUATMBy7r0c # https://groups.google.com/g/dart-ffi/c/nUATMBy7r0c
Pod::Spec.new do |s| Pod::Spec.new do |s|
s.name = 'sherpa_onnx_ios' s.name = 'sherpa_onnx_ios'
s.version = '1.10.16' s.version = '1.10.17'
s.summary = 'A new Flutter FFI plugin project.' s.summary = 'A new Flutter FFI plugin project.'
s.description = <<-DESC s.description = <<-DESC
A new Flutter FFI plugin project. A new Flutter FFI plugin project.

View File

@@ -4,7 +4,7 @@
# #
Pod::Spec.new do |s| Pod::Spec.new do |s|
s.name = 'sherpa_onnx_macos' s.name = 'sherpa_onnx_macos'
s.version = '1.10.16' s.version = '1.10.17'
s.summary = 'sherpa-onnx Flutter FFI plugin project.' s.summary = 'sherpa-onnx Flutter FFI plugin project.'
s.description = <<-DESC s.description = <<-DESC
sherpa-onnx Flutter FFI plugin project. sherpa-onnx Flutter FFI plugin project.

View File

@@ -17,7 +17,7 @@ topics:
- voice-activity-detection - voice-activity-detection
# remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx.podspec # remember to change the version in ../sherpa_onnx_macos/macos/sherpa_onnx.podspec
version: 1.10.16 version: 1.10.17
homepage: https://github.com/k2-fsa/sherpa-onnx homepage: https://github.com/k2-fsa/sherpa-onnx

View File

@@ -13,14 +13,15 @@ namespace sherpa_onnx {
void CudaConfig::Register(ParseOptions *po) { void CudaConfig::Register(ParseOptions *po) {
po->Register("cuda-cudnn-conv-algo-search", &cudnn_conv_algo_search, po->Register("cuda-cudnn-conv-algo-search", &cudnn_conv_algo_search,
"CuDNN convolution algrorithm search"); "CuDNN convolution algrorithm search");
} }
bool CudaConfig::Validate() const { bool CudaConfig::Validate() const {
if (cudnn_conv_algo_search < 1 || cudnn_conv_algo_search > 3) { if (cudnn_conv_algo_search < 1 || cudnn_conv_algo_search > 3) {
SHERPA_ONNX_LOGE("cudnn_conv_algo_search: '%d' is not a valid option." SHERPA_ONNX_LOGE(
"Options : [1,3]. Check OnnxRT docs", "cudnn_conv_algo_search: '%d' is not a valid option."
cudnn_conv_algo_search); "Options : [1,3]. Check OnnxRT docs",
cudnn_conv_algo_search);
return false; return false;
} }
return true; return true;
@@ -37,41 +38,41 @@ std::string CudaConfig::ToString() const {
void TensorrtConfig::Register(ParseOptions *po) { void TensorrtConfig::Register(ParseOptions *po) {
po->Register("trt-max-workspace-size", &trt_max_workspace_size, po->Register("trt-max-workspace-size", &trt_max_workspace_size,
"Set TensorRT EP GPU memory usage limit."); "Set TensorRT EP GPU memory usage limit.");
po->Register("trt-max-partition-iterations", &trt_max_partition_iterations, po->Register("trt-max-partition-iterations", &trt_max_partition_iterations,
"Limit partitioning iterations for model conversion."); "Limit partitioning iterations for model conversion.");
po->Register("trt-min-subgraph-size", &trt_min_subgraph_size, po->Register("trt-min-subgraph-size", &trt_min_subgraph_size,
"Set minimum size for subgraphs in partitioning."); "Set minimum size for subgraphs in partitioning.");
po->Register("trt-fp16-enable", &trt_fp16_enable, po->Register("trt-fp16-enable", &trt_fp16_enable,
"Enable FP16 precision for faster performance."); "Enable FP16 precision for faster performance.");
po->Register("trt-detailed-build-log", &trt_detailed_build_log, po->Register("trt-detailed-build-log", &trt_detailed_build_log,
"Enable detailed logging of build steps."); "Enable detailed logging of build steps.");
po->Register("trt-engine-cache-enable", &trt_engine_cache_enable, po->Register("trt-engine-cache-enable", &trt_engine_cache_enable,
"Enable caching of TensorRT engines."); "Enable caching of TensorRT engines.");
po->Register("trt-timing-cache-enable", &trt_timing_cache_enable, po->Register("trt-timing-cache-enable", &trt_timing_cache_enable,
"Enable use of timing cache to speed up builds."); "Enable use of timing cache to speed up builds.");
po->Register("trt-engine-cache-path", &trt_engine_cache_path, po->Register("trt-engine-cache-path", &trt_engine_cache_path,
"Set path to store cached TensorRT engines."); "Set path to store cached TensorRT engines.");
po->Register("trt-timing-cache-path", &trt_timing_cache_path, po->Register("trt-timing-cache-path", &trt_timing_cache_path,
"Set path for storing timing cache."); "Set path for storing timing cache.");
po->Register("trt-dump-subgraphs", &trt_dump_subgraphs, po->Register("trt-dump-subgraphs", &trt_dump_subgraphs,
"Dump optimized subgraphs for debugging."); "Dump optimized subgraphs for debugging.");
} }
bool TensorrtConfig::Validate() const { bool TensorrtConfig::Validate() const {
if (trt_max_workspace_size < 0) { if (trt_max_workspace_size < 0) {
SHERPA_ONNX_LOGE("trt_max_workspace_size: %lld is not valid.", SHERPA_ONNX_LOGE("trt_max_workspace_size: %ld is not valid.",
trt_max_workspace_size); trt_max_workspace_size);
return false; return false;
} }
if (trt_max_partition_iterations < 0) { if (trt_max_partition_iterations < 0) {
SHERPA_ONNX_LOGE("trt_max_partition_iterations: %d is not valid.", SHERPA_ONNX_LOGE("trt_max_partition_iterations: %d is not valid.",
trt_max_partition_iterations); trt_max_partition_iterations);
return false; return false;
} }
if (trt_min_subgraph_size < 0) { if (trt_min_subgraph_size < 0) {
SHERPA_ONNX_LOGE("trt_min_subgraph_size: %d is not valid.", SHERPA_ONNX_LOGE("trt_min_subgraph_size: %d is not valid.",
trt_min_subgraph_size); trt_min_subgraph_size);
return false; return false;
} }
@@ -83,23 +84,19 @@ std::string TensorrtConfig::ToString() const {
os << "TensorrtConfig("; os << "TensorrtConfig(";
os << "trt_max_workspace_size=" << trt_max_workspace_size << ", "; os << "trt_max_workspace_size=" << trt_max_workspace_size << ", ";
os << "trt_max_partition_iterations=" os << "trt_max_partition_iterations=" << trt_max_partition_iterations << ", ";
<< trt_max_partition_iterations << ", ";
os << "trt_min_subgraph_size=" << trt_min_subgraph_size << ", "; os << "trt_min_subgraph_size=" << trt_min_subgraph_size << ", ";
os << "trt_fp16_enable=\"" os << "trt_fp16_enable=\"" << (trt_fp16_enable ? "True" : "False") << "\", ";
<< (trt_fp16_enable? "True" : "False") << "\", ";
os << "trt_detailed_build_log=\"" os << "trt_detailed_build_log=\""
<< (trt_detailed_build_log? "True" : "False") << "\", "; << (trt_detailed_build_log ? "True" : "False") << "\", ";
os << "trt_engine_cache_enable=\"" os << "trt_engine_cache_enable=\""
<< (trt_engine_cache_enable? "True" : "False") << "\", "; << (trt_engine_cache_enable ? "True" : "False") << "\", ";
os << "trt_engine_cache_path=\"" os << "trt_engine_cache_path=\"" << trt_engine_cache_path.c_str() << "\", ";
<< trt_engine_cache_path.c_str() << "\", ";
os << "trt_timing_cache_enable=\"" os << "trt_timing_cache_enable=\""
<< (trt_timing_cache_enable? "True" : "False") << "\", "; << (trt_timing_cache_enable ? "True" : "False") << "\", ";
os << "trt_timing_cache_path=\"" os << "trt_timing_cache_path=\"" << trt_timing_cache_path.c_str() << "\",";
<< trt_timing_cache_path.c_str() << "\","; os << "trt_dump_subgraphs=\"" << (trt_dump_subgraphs ? "True" : "False")
os << "trt_dump_subgraphs=\"" << "\" )";
<< (trt_dump_subgraphs? "True" : "False") << "\" )";
return os.str(); return os.str();
} }