Add Python API for keyword spotting (#576)

* Add alsa & microphone support for keyword spotting

* Add python wrapper
This commit is contained in:
Wei Kang
2024-03-01 09:31:11 +08:00
committed by GitHub
parent 8b7928e7d6
commit 734bbd91dc
15 changed files with 1191 additions and 1 deletions

View File

@@ -5,6 +5,7 @@ pybind11_add_module(_sherpa_onnx
display.cc
endpoint.cc
features.cc
keyword-spotter.cc
offline-ctc-fst-decoder-config.cc
offline-lm-config.cc
offline-model-config.cc

View File

@@ -0,0 +1,82 @@
// sherpa-onnx/python/csrc/keyword-spotter.cc
//
// Copyright (c) 2024 Xiaomi Corporation
#include "sherpa-onnx/python/csrc/keyword-spotter.h"
#include <string>
#include <vector>
#include "sherpa-onnx/csrc/keyword-spotter.h"
namespace sherpa_onnx {
static void PybindKeywordResult(py::module *m) {
using PyClass = KeywordResult;
py::class_<PyClass>(*m, "KeywordResult")
.def_property_readonly(
"keyword",
[](PyClass &self) -> py::str {
return py::str(PyUnicode_DecodeUTF8(self.keyword.c_str(),
self.keyword.size(), "ignore"));
})
.def_property_readonly(
"tokens",
[](PyClass &self) -> std::vector<std::string> { return self.tokens; })
.def_property_readonly(
"timestamps",
[](PyClass &self) -> std::vector<float> { return self.timestamps; });
}
static void PybindKeywordSpotterConfig(py::module *m) {
using PyClass = KeywordSpotterConfig;
py::class_<PyClass>(*m, "KeywordSpotterConfig")
.def(py::init<const FeatureExtractorConfig &, const OnlineModelConfig &,
int32_t, int32_t, float, float, const std::string &>(),
py::arg("feat_config"), py::arg("model_config"),
py::arg("max_active_paths") = 4, py::arg("num_trailing_blanks") = 1,
py::arg("keywords_score") = 1.0,
py::arg("keywords_threshold") = 0.25, py::arg("keywords_file") = "")
.def_readwrite("feat_config", &PyClass::feat_config)
.def_readwrite("model_config", &PyClass::model_config)
.def_readwrite("max_active_paths", &PyClass::max_active_paths)
.def_readwrite("num_trailing_blanks", &PyClass::num_trailing_blanks)
.def_readwrite("keywords_score", &PyClass::keywords_score)
.def_readwrite("keywords_threshold", &PyClass::keywords_threshold)
.def_readwrite("keywords_file", &PyClass::keywords_file)
.def("__str__", &PyClass::ToString);
}
void PybindKeywordSpotter(py::module *m) {
PybindKeywordResult(m);
PybindKeywordSpotterConfig(m);
using PyClass = KeywordSpotter;
py::class_<PyClass>(*m, "KeywordSpotter")
.def(py::init<const KeywordSpotterConfig &>(), py::arg("config"),
py::call_guard<py::gil_scoped_release>())
.def(
"create_stream",
[](const PyClass &self) { return self.CreateStream(); },
py::call_guard<py::gil_scoped_release>())
.def(
"create_stream",
[](PyClass &self, const std::string &keywords) {
return self.CreateStream(keywords);
},
py::arg("keywords"), py::call_guard<py::gil_scoped_release>())
.def("is_ready", &PyClass::IsReady,
py::call_guard<py::gil_scoped_release>())
.def("decode_stream", &PyClass::DecodeStream,
py::call_guard<py::gil_scoped_release>())
.def(
"decode_streams",
[](PyClass &self, std::vector<OnlineStream *> ss) {
self.DecodeStreams(ss.data(), ss.size());
},
py::call_guard<py::gil_scoped_release>())
.def("get_result", &PyClass::GetResult,
py::call_guard<py::gil_scoped_release>());
}
} // namespace sherpa_onnx

View File

@@ -0,0 +1,16 @@
// sherpa-onnx/python/csrc/keyword-spotter.h
//
// Copyright (c) 2024 Xiaomi Corporation
#ifndef SHERPA_ONNX_PYTHON_CSRC_KEYWORD_SPOTTER_H_
#define SHERPA_ONNX_PYTHON_CSRC_KEYWORD_SPOTTER_H_
#include "sherpa-onnx/python/csrc/sherpa-onnx.h"
namespace sherpa_onnx {
void PybindKeywordSpotter(py::module *m);
}
#endif // SHERPA_ONNX_PYTHON_CSRC_KEYWORD_SPOTTER_H_

View File

@@ -8,6 +8,7 @@
#include "sherpa-onnx/python/csrc/display.h"
#include "sherpa-onnx/python/csrc/endpoint.h"
#include "sherpa-onnx/python/csrc/features.h"
#include "sherpa-onnx/python/csrc/keyword-spotter.h"
#include "sherpa-onnx/python/csrc/offline-ctc-fst-decoder-config.h"
#include "sherpa-onnx/python/csrc/offline-lm-config.h"
#include "sherpa-onnx/python/csrc/offline-model-config.h"
@@ -35,6 +36,7 @@ PYBIND11_MODULE(_sherpa_onnx, m) {
PybindOnlineStream(&m);
PybindEndpoint(&m);
PybindOnlineRecognizer(&m);
PybindKeywordSpotter(&m);
PybindDisplay(&m);

View File

@@ -17,6 +17,7 @@ from _sherpa_onnx import (
VoiceActivityDetector,
)
from .keyword_spotter import KeywordSpotter
from .offline_recognizer import OfflineRecognizer
from .online_recognizer import OnlineRecognizer
from .utils import text2token

View File

@@ -0,0 +1,147 @@
# Copyright (c) 2023 Xiaomi Corporation
from pathlib import Path
from typing import List, Optional
from _sherpa_onnx import (
FeatureExtractorConfig,
KeywordSpotterConfig,
OnlineModelConfig,
OnlineTransducerModelConfig,
OnlineStream,
)
from _sherpa_onnx import KeywordSpotter as _KeywordSpotter
def _assert_file_exists(f: str):
assert Path(f).is_file(), f"{f} does not exist"
class KeywordSpotter(object):
"""A class for keyword spotting.
Please refer to the following files for usages
- https://github.com/k2-fsa/sherpa-onnx/blob/master/python-api-examples/keyword-spotter.py
- https://github.com/k2-fsa/sherpa-onnx/blob/master/python-api-examples/keyword-spotter-from-microphone.py
"""
def __init__(
self,
tokens: str,
encoder: str,
decoder: str,
joiner: str,
keywords_file: str,
num_threads: int = 2,
sample_rate: float = 16000,
feature_dim: int = 80,
max_active_paths: int = 4,
keywords_score: float = 1.0,
keywords_threshold: float = 0.25,
num_trailing_blanks: int = 1,
provider: str = "cpu",
):
"""
Please refer to
`<https://k2-fsa.github.io/sherpa/onnx/kws/pretrained_models/index.html>`_
to download pre-trained models for different languages, e.g., Chinese,
English, etc.
Args:
tokens:
Path to ``tokens.txt``. Each line in ``tokens.txt`` contains two
columns::
symbol integer_id
encoder:
Path to ``encoder.onnx``.
decoder:
Path to ``decoder.onnx``.
joiner:
Path to ``joiner.onnx``.
keywords_file:
The file containing keywords, one word/phrase per line, and for each
phrase the bpe/cjkchar/pinyin are separated by a space.
num_threads:
Number of threads for neural network computation.
sample_rate:
Sample rate of the training data used to train the model.
feature_dim:
Dimension of the feature used to train the model.
max_active_paths:
Use only when decoding_method is modified_beam_search. It specifies
the maximum number of active paths during beam search.
keywords_score:
The boosting score of each token for keywords. The larger the easier to
survive beam search.
keywords_threshold:
The trigger threshold (i.e. probability) of the keyword. The larger the
harder to trigger.
num_trailing_blanks:
The number of trailing blanks a keyword should be followed. Setting
to a larger value (e.g. 8) when your keywords has overlapping tokens
between each other.
provider:
onnxruntime execution providers. Valid values are: cpu, cuda, coreml.
"""
_assert_file_exists(tokens)
_assert_file_exists(encoder)
_assert_file_exists(decoder)
_assert_file_exists(joiner)
assert num_threads > 0, num_threads
transducer_config = OnlineTransducerModelConfig(
encoder=encoder,
decoder=decoder,
joiner=joiner,
)
model_config = OnlineModelConfig(
transducer=transducer_config,
tokens=tokens,
num_threads=num_threads,
provider=provider,
)
feat_config = FeatureExtractorConfig(
sampling_rate=sample_rate,
feature_dim=feature_dim,
)
keywords_spotter_config = KeywordSpotterConfig(
feat_config=feat_config,
model_config=model_config,
max_active_paths=max_active_paths,
num_trailing_blanks=num_trailing_blanks,
keywords_score=keywords_score,
keywords_threshold=keywords_threshold,
keywords_file=keywords_file,
)
self.keyword_spotter = _KeywordSpotter(keywords_spotter_config)
def create_stream(self, keywords: Optional[str] = None):
if keywords is None:
return self.keyword_spotter.create_stream()
else:
return self.keyword_spotter.create_stream(keywords)
def decode_stream(self, s: OnlineStream):
self.keyword_spotter.decode_stream(s)
def decode_streams(self, ss: List[OnlineStream]):
self.keyword_spotter.decode_streams(ss)
def is_ready(self, s: OnlineStream) -> bool:
return self.keyword_spotter.is_ready(s)
def get_result(self, s: OnlineStream) -> str:
return self.keyword_spotter.get_result(s).keyword.strip()
def tokens(self, s: OnlineStream) -> List[str]:
return self.keyword_spotter.get_result(s).tokens
def timestamps(self, s: OnlineStream) -> List[float]:
return self.keyword_spotter.get_result(s).timestamps

View File

@@ -20,6 +20,7 @@ endfunction()
# please sort the files in alphabetic order
set(py_test_files
test_feature_extractor_config.py
test_keyword_spotter.py
test_offline_recognizer.py
test_online_recognizer.py
test_online_transducer_model_config.py

View File

@@ -0,0 +1,170 @@
# sherpa-onnx/python/tests/test_keyword_spotter.py
#
# Copyright (c) 2024 Xiaomi Corporation
#
# To run this single test, use
#
# ctest --verbose -R test_keyword_spotter_py
import unittest
import wave
from pathlib import Path
from typing import Tuple
import numpy as np
import sherpa_onnx
d = "/tmp/onnx-models"
# Please refer to
# https://k2-fsa.github.io/sherpa/onnx/kws/pretrained_models/index.html
# to download pre-trained models for testing
def read_wave(wave_filename: str) -> Tuple[np.ndarray, int]:
"""
Args:
wave_filename:
Path to a wave file. It should be single channel and each sample should
be 16-bit. Its sample rate does not need to be 16kHz.
Returns:
Return a tuple containing:
- A 1-D array of dtype np.float32 containing the samples, which are
normalized to the range [-1, 1].
- sample rate of the wave file
"""
with wave.open(wave_filename) as f:
assert f.getnchannels() == 1, f.getnchannels()
assert f.getsampwidth() == 2, f.getsampwidth() # it is in bytes
num_samples = f.getnframes()
samples = f.readframes(num_samples)
samples_int16 = np.frombuffer(samples, dtype=np.int16)
samples_float32 = samples_int16.astype(np.float32)
samples_float32 = samples_float32 / 32768
return samples_float32, f.getframerate()
class TestKeywordSpotter(unittest.TestCase):
def test_zipformer_transducer_en(self):
for use_int8 in [True, False]:
if use_int8:
encoder = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
decoder = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/decoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
joiner = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/joiner-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
else:
encoder = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
decoder = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/decoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
joiner = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/joiner-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
tokens = (
f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/tokens.txt"
)
keywords_file = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/test_wavs/test_keywords.txt"
wave0 = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/test_wavs/0.wav"
wave1 = f"{d}/sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01/test_wavs/1.wav"
if not Path(encoder).is_file():
print("skipping test_zipformer_transducer_en()")
return
keyword_spotter = sherpa_onnx.KeywordSpotter(
encoder=encoder,
decoder=decoder,
joiner=joiner,
tokens=tokens,
num_threads=1,
keywords_file=keywords_file,
provider="cpu",
)
streams = []
waves = [wave0, wave1]
for wave in waves:
s = keyword_spotter.create_stream()
samples, sample_rate = read_wave(wave)
s.accept_waveform(sample_rate, samples)
tail_paddings = np.zeros(int(0.2 * sample_rate), dtype=np.float32)
s.accept_waveform(sample_rate, tail_paddings)
s.input_finished()
streams.append(s)
results = [""] * len(streams)
while True:
ready_list = []
for i, s in enumerate(streams):
if keyword_spotter.is_ready(s):
ready_list.append(s)
r = keyword_spotter.get_result(s)
if r:
print(f"{r} is detected.")
results[i] += f"{r}/"
if len(ready_list) == 0:
break
keyword_spotter.decode_streams(ready_list)
for wave_filename, result in zip(waves, results):
print(f"{wave_filename}\n{result[0:-1]}")
print("-" * 10)
def test_zipformer_transducer_cn(self):
for use_int8 in [True, False]:
if use_int8:
encoder = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
decoder = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/decoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
joiner = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/joiner-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
else:
encoder = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/encoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
decoder = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/decoder-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
joiner = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/joiner-epoch-12-avg-2-chunk-16-left-64.int8.onnx"
tokens = (
f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/tokens.txt"
)
keywords_file = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/test_wavs/test_keywords.txt"
wave0 = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/test_wavs/3.wav"
wave1 = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/test_wavs/4.wav"
wave2 = f"{d}/sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01/test_wavs/5.wav"
if not Path(encoder).is_file():
print("skipping test_zipformer_transducer_cn()")
return
keyword_spotter = sherpa_onnx.KeywordSpotter(
encoder=encoder,
decoder=decoder,
joiner=joiner,
tokens=tokens,
num_threads=1,
keywords_file=keywords_file,
provider="cpu",
)
streams = []
waves = [wave0, wave1, wave2]
for wave in waves:
s = keyword_spotter.create_stream()
samples, sample_rate = read_wave(wave)
s.accept_waveform(sample_rate, samples)
tail_paddings = np.zeros(int(0.2 * sample_rate), dtype=np.float32)
s.accept_waveform(sample_rate, tail_paddings)
s.input_finished()
streams.append(s)
results = [""] * len(streams)
while True:
ready_list = []
for i, s in enumerate(streams):
if keyword_spotter.is_ready(s):
ready_list.append(s)
r = keyword_spotter.get_result(s)
if r:
print(f"{r} is detected.")
results[i] += f"{r}/"
if len(ready_list) == 0:
break
keyword_spotter.decode_streams(ready_list)
for wave_filename, result in zip(waves, results):
print(f"{wave_filename}\n{result[0:-1]}")
print("-" * 10)
if __name__ == "__main__":
unittest.main()