Add Dart API for audio tagging (#1181)

This commit is contained in:
Fangjun Kuang
2024-07-29 11:15:14 +08:00
committed by GitHub
parent 69b6b47d91
commit cd1fedaa49
30 changed files with 504 additions and 18 deletions

View File

@@ -5,7 +5,7 @@ This directory contains examples for Dart API.
You can find the package at
https://pub.dev/packages/sherpa_onnx
## Descirption
## Description
| Directory | Description |
|-----------|-------------|
@@ -15,6 +15,7 @@ https://pub.dev/packages/sherpa_onnx
| [./tts](./tts)| Example for text to speech|
| [./vad](./vad)| Example for voice activity detection|
| [./vad-with-non-streaming-asr](./vad-with-non-streaming-asr)| Example for voice activity detection with non-streaming speech recognition. You can use it to generate subtitles.|
| [./audio-tagging](./audio-tagging)| Example for audio tagging.|
## How to create an example in this folder

View File

@@ -0,0 +1,3 @@
# https://dart.dev/guides/libraries/private-files
# Created by `dart pub`
.dart_tool/

View File

@@ -0,0 +1,8 @@
# Introduction
This example shows how to use the Dart API from sherpa-onnx for audio tagging.
| File | Description|
|------|------------|
|[./bin/zipformer.dart](./bin/zipformer.dart)| Use a Zipformer model for audio tagging. See [./run-zipformer.sh](./run-zipformer.sh)|
|[./bin/ced.dart](./bin/ced.dart)| Use a [CED](https://github.com/RicherMans/CED) model for audio tagging. See [./run-ced.sh](./run-ced.sh)|

View File

@@ -0,0 +1,30 @@
# This file configures the static analysis results for your project (errors,
# warnings, and lints).
#
# This enables the 'recommended' set of lints from `package:lints`.
# This set helps identify many issues that may lead to problems when running
# or consuming Dart code, and enforces writing Dart using a single, idiomatic
# style and format.
#
# If you want a smaller set of lints you can change this to specify
# 'package:lints/core.yaml'. These are just the most critical lints
# (the recommended set includes the core lints).
# The core lints are also what is used by pub.dev for scoring packages.
include: package:lints/recommended.yaml
# Uncomment the following section to specify additional rules.
# linter:
# rules:
# - camel_case_types
# analyzer:
# exclude:
# - path/to/excluded/files/**
# For more information about the core and recommended set of lints, see
# https://dart.dev/go/core-lints
# For additional information about configuring this file, see
# https://dart.dev/guides/language/analysis-options

View File

@@ -0,0 +1,54 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the zipformer model')
..addOption('labels', help: 'Path to class_labels_indices.csv')
..addOption('top-k', help: 'topK events to be returned', defaultsTo: '5')
..addOption('wav', help: 'Path to test.wav to be tagged');
final res = parser.parse(arguments);
if (res['model'] == null || res['labels'] == null || res['wav'] == null) {
print(parser.usage);
exit(1);
}
final model = res['model'] as String;
final labels = res['labels'] as String;
final topK = int.tryParse(res['top-k'] as String) ?? 5;
final wav = res['wav'] as String;
final modelConfig = sherpa_onnx.AudioTaggingModelConfig(
ced: model,
numThreads: 1,
debug: true,
provider: 'cpu',
);
final config = sherpa_onnx.AudioTaggingConfig(
model: modelConfig,
labels: labels,
);
final at = sherpa_onnx.AudioTagging(config: config);
final waveData = sherpa_onnx.readWave(wav);
final stream = at.createStream();
stream.acceptWaveform(
samples: waveData.samples, sampleRate: waveData.sampleRate);
final events = at.compute(stream: stream, topK: topK);
print(events);
stream.free();
at.free();
}

View File

@@ -0,0 +1 @@
../../vad/bin/init.dart

View File

@@ -0,0 +1,59 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
import './init.dart';
void main(List<String> arguments) async {
await initSherpaOnnx();
final parser = ArgParser()
..addOption('model', help: 'Path to the zipformer model')
..addOption('labels', help: 'Path to class_labels_indices.csv')
..addOption('top-k', help: 'topK events to be returned', defaultsTo: '5')
..addOption('wav', help: 'Path to test.wav to be tagged');
final res = parser.parse(arguments);
if (res['model'] == null || res['labels'] == null || res['wav'] == null) {
print(parser.usage);
exit(1);
}
final model = res['model'] as String;
final labels = res['labels'] as String;
final topK = int.tryParse(res['top-k'] as String) ?? 5;
final wav = res['wav'] as String;
final zipformerModelConfig =
sherpa_onnx.OfflineZipformerAudioTaggingModelConfig(
model: model,
);
final modelConfig = sherpa_onnx.AudioTaggingModelConfig(
zipformer: zipformerModelConfig,
numThreads: 1,
debug: true,
provider: 'cpu',
);
final config = sherpa_onnx.AudioTaggingConfig(
model: modelConfig,
labels: labels,
);
final at = sherpa_onnx.AudioTagging(config: config);
final waveData = sherpa_onnx.readWave(wav);
final stream = at.createStream();
stream.acceptWaveform(
samples: waveData.samples, sampleRate: waveData.sampleRate);
final events = at.compute(stream: stream, topK: topK);
print(events);
stream.free();
at.free();
}

View File

@@ -0,0 +1,17 @@
name: audio_tagging
description: >
This example demonstrates how to use the Dart API for audio tagging.
version: 1.0.0
environment:
sdk: ^3.4.0
dependencies:
sherpa_onnx: ^1.10.19
path: ^1.9.0
args: ^2.5.0
dev_dependencies:
lints: ^3.0.0

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -ex
dart pub get
if [[ ! -f ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/model.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/audio-tagging-models/sherpa-onnx-ced-mini-audio-tagging-2024-04-19.tar.bz2
tar xvf sherpa-onnx-ced-mini-audio-tagging-2024-04-19.tar.bz2
rm sherpa-onnx-ced-mini-audio-tagging-2024-04-19.tar.bz2
fi
for w in 1 2 3 4 5 6; do
dart run \
./bin/ced.dart \
--model ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/model.int8.onnx \
--labels ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/class_labels_indices.csv \
--wav ./sherpa-onnx-ced-mini-audio-tagging-2024-04-19/test_wavs/$w.wav
done

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -ex
dart pub get
if [[ ! -f ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/model.onnx ]]; then
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/audio-tagging-models/sherpa-onnx-zipformer-audio-tagging-2024-04-09.tar.bz2
tar xvf sherpa-onnx-zipformer-audio-tagging-2024-04-09.tar.bz2
rm sherpa-onnx-zipformer-audio-tagging-2024-04-09.tar.bz2
fi
for w in 1 2 3 4 5 6; do
dart run \
./bin/zipformer.dart \
--model ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/model.int8.onnx \
--labels ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/class_labels_indices.csv \
--wav ./sherpa-onnx-zipformer-audio-tagging-2024-04-09/test_wavs/$w.wav
done

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
@@ -65,5 +64,5 @@ void main(List<String> arguments) async {
samples: audio.samples,
sampleRate: audio.sampleRate,
);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}

View File

@@ -80,5 +80,5 @@ void main(List<String> arguments) async {
samples: audio.samples,
sampleRate: audio.sampleRate,
);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}

View File

@@ -1,6 +1,5 @@
// Copyright (c) 2024 Xiaomi Corporation
import 'dart:io';
import 'dart:typed_data';
import 'package:args/args.dart';
import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx;
@@ -82,5 +81,5 @@ void main(List<String> arguments) async {
samples: audio.samples,
sampleRate: audio.sampleRate,
);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}

View File

@@ -77,5 +77,5 @@ void main(List<String> arguments) async {
sherpa_onnx.writeWave(
filename: outputWav, samples: s, sampleRate: waveData.sampleRate);
print('Saved to ${outputWav}');
print('Saved to $outputWav');
}