Add CI to build HAPs for HarmonyOS (#1578)

This commit is contained in:
Fangjun Kuang
2024-11-29 21:13:01 +08:00
committed by GitHub
parent be159f943e
commit 299f2392e2
11 changed files with 376 additions and 24 deletions

View File

@@ -0,0 +1,29 @@
{
"meta": {
"stableOrder": true
},
"lockfileVersion": 3,
"ATTENTION": "THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.",
"specifiers": {
"libsherpa_onnx.so@../oh_modules/.ohpm/sherpa_onnx@1.10.32/oh_modules/sherpa_onnx/src/main/cpp/types/libsherpa_onnx": "libsherpa_onnx.so@../oh_modules/.ohpm/sherpa_onnx@1.10.32/oh_modules/sherpa_onnx/src/main/cpp/types/libsherpa_onnx",
"sherpa_onnx@1.10.32": "sherpa_onnx@1.10.32"
},
"packages": {
"libsherpa_onnx.so@../oh_modules/.ohpm/sherpa_onnx@1.10.32/oh_modules/sherpa_onnx/src/main/cpp/types/libsherpa_onnx": {
"name": "libsherpa_onnx.so",
"version": "1.0.0",
"resolved": "../oh_modules/.ohpm/sherpa_onnx@1.10.32/oh_modules/sherpa_onnx/src/main/cpp/types/libsherpa_onnx",
"registryType": "local"
},
"sherpa_onnx@1.10.32": {
"name": "sherpa_onnx",
"version": "1.10.32",
"integrity": "sha512-yHYmWoeqhrunOqGr9gxPJJH/8+rdwcKFOW6onYByVObQVpbqypslg301IjGm9xpnc5bJEkO3S9sra2zQTpPA/w==",
"resolved": "https://ohpm.openharmony.cn/ohpm/sherpa_onnx/-/sherpa_onnx-1.10.32.har",
"registryType": "ohpm",
"dependencies": {
"libsherpa_onnx.so": "file:./src/main/cpp/types/libsherpa_onnx"
}
}
}
}

View File

@@ -5,6 +5,9 @@
"main": "",
"author": "",
"license": "",
"dependencies": {}
"dependencies": {
// please see https://ohpm.openharmony.cn/#/cn/detail/sherpa_onnx
"sherpa_onnx": "1.10.32",
}
}

View File

@@ -4,7 +4,7 @@
import { OfflineModelConfig } from 'sherpa_onnx';
export function getOfflineModelConfig(type: number): OfflineModelConfig {
const c = new OfflineModelConfig();
const c: OfflineModelConfig = new OfflineModelConfig();
switch (type) {
case 0: {
const modelDir = 'sherpa-onnx-paraformer-zh-2023-09-14'

View File

@@ -2,8 +2,11 @@ import { ErrorEvent, MessageEvents, ThreadWorkerGlobalScope, worker } from '@kit
import {
OfflineRecognizer,
OfflineRecognizerConfig,
OfflineStream,
OnlineRecognizerResult,
readWaveFromBinary,
SileroVadConfig,
SpeechSegment,
Vad,
VadConfig,
} from 'sherpa_onnx';
@@ -18,7 +21,7 @@ let vad: Vad; // vad for decoding files
function initVad(context: Context): Vad {
let mgr = context.resourceManager;
const config = new VadConfig(
const config: VadConfig = new VadConfig(
new SileroVadConfig(
'silero_vad.onnx',
0.5,
@@ -37,7 +40,7 @@ function initVad(context: Context): Vad {
function initNonStreamingAsr(context: Context): OfflineRecognizer {
let mgr = context.resourceManager;
const config = new OfflineRecognizerConfig();
const config: OfflineRecognizerConfig = new OfflineRecognizerConfig();
// Note that you can switch to a new model by changing type
//
@@ -61,7 +64,13 @@ function initNonStreamingAsr(context: Context): OfflineRecognizer {
const type = 2;
config.modelConfig = getOfflineModelConfig(type);
config.modelConfig.debug = true;
return new OfflineRecognizer(config, mgr)
config.ruleFsts = '';
return new OfflineRecognizer(config, mgr);
}
interface Wave {
samples: Float32Array;
sampleRate: number;
}
function decode(filename: string): string {
@@ -71,44 +80,44 @@ function decode(filename: string): string {
const stat = fileIo.statSync(fp.fd);
const arrayBuffer = new ArrayBuffer(stat.size);
fileIo.readSync(fp.fd, arrayBuffer);
const data = new Uint8Array(arrayBuffer);
const data: Uint8Array = new Uint8Array(arrayBuffer);
const wave = readWaveFromBinary(data);
const wave: Wave = readWaveFromBinary(data);
console.log(`sample rate ${wave.sampleRate}`);
console.log(`samples length ${wave.samples.length}`);
const resultList: string[] = [];
const windowSize = vad.config.sileroVad.windowSize;
const windowSize: number = vad.config.sileroVad.windowSize;
for (let i = 0; i < wave.samples.length; i += windowSize) {
const thisWindow = wave.samples.subarray(i, i + windowSize)
const thisWindow: Float32Array = wave.samples.subarray(i, i + windowSize)
vad.acceptWaveform(thisWindow);
if (i + windowSize >= wave.samples.length) {
vad.flush();
}
while (!vad.isEmpty()) {
const segment = vad.front();
const _startTime = (segment.start / wave.sampleRate);
const _endTime = _startTime + segment.samples.length / wave.sampleRate;
const segment: SpeechSegment = vad.front();
const _startTime: number = (segment.start / wave.sampleRate);
const _endTime: number = _startTime + segment.samples.length / wave.sampleRate;
if (_endTime - _startTime < 0.2) {
vad.pop();
continue;
}
const startTime = _startTime.toFixed(2);
const endTime = _endTime.toFixed(2);
const startTime: string = _startTime.toFixed(2);
const endTime: string = _endTime.toFixed(2);
const progress = (segment.start + segment.samples.length) / wave.samples.length * 100;
const progress: number = (segment.start + segment.samples.length) / wave.samples.length * 100;
workerPort.postMessage({ 'msgType': 'non-streaming-asr-vad-decode-progress', progress });
const stream = recognizer.createStream();
const stream: OfflineStream = recognizer.createStream();
stream.acceptWaveform({ samples: segment.samples, sampleRate: wave.sampleRate });
recognizer.decode(stream);
const result = recognizer.getResult(stream);
const result: OnlineRecognizerResult = recognizer.getResult(stream);
const text = `${startTime} -- ${endTime} ${result.text}`
const text: string = `${startTime} -- ${endTime} ${result.text}`
resultList.push(text);
console.log(`partial result ${text}`);