Add scripts for exporting Piper TTS models to sherpa-onnx (#2299)

This commit is contained in:
Fangjun Kuang
2025-06-17 14:23:39 +08:00
committed by GitHub
parent 4ae9382bae
commit 2913cce77c
8 changed files with 1914 additions and 1 deletions

5
scripts/piper/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
*.sh
*.onnx
*.json
MODEL_CARD
generate_samples-vits-piper*.py

117
scripts/piper/add_meta_data.py Executable file
View File

@@ -0,0 +1,117 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import argparse
import json
from typing import Any, Dict
import onnx
from iso639 import Lang
def get_args():
# For en_GB-semaine-medium
# --name semaine
# --kind medium
# --lang en_GB
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--name",
type=str,
required=True,
)
parser.add_argument(
"--kind",
type=str,
required=True,
)
parser.add_argument(
"--lang",
type=str,
required=True,
)
return parser.parse_args()
def add_meta_data(filename: str, meta_data: Dict[str, Any]):
"""Add meta data to an ONNX model. It is changed in-place.
Args:
filename:
Filename of the ONNX model to be changed.
meta_data:
Key-value pairs.
"""
model = onnx.load(filename)
while len(model.metadata_props):
model.metadata_props.pop()
for key, value in meta_data.items():
meta = model.metadata_props.add()
meta.key = key
meta.value = str(value)
onnx.save(model, filename)
def load_config(filename):
with open(filename, "r") as file:
config = json.load(file)
return config
def generate_tokens(config):
id_map = config["phoneme_id_map"]
with open("tokens.txt", "w", encoding="utf-8") as f:
for s, i in id_map.items():
f.write(f"{s} {i[0]}\n")
print("Generated tokens.txt")
# for en_US-lessac-medium.onnx
# export LANG=en_US
# export TYPE=lessac
# export NAME=medium
def main():
args = get_args()
print(args)
lang = args.lang
lang_iso = Lang(lang.split("_")[0])
print(lang, lang_iso)
kind = args.kind
name = args.name
# en_GB-alan-low.onnx.json
config = load_config(f"{lang}-{name}-{kind}.onnx.json")
print("generate tokens")
generate_tokens(config)
sample_rate = config["audio"]["sample_rate"]
if sample_rate == 22500:
print("Change sample rate from 22500 to 22050")
sample_rate = 22050
print("add model metadata")
meta_data = {
"model_type": "vits",
"comment": "piper", # must be piper for models from piper
"language": lang_iso.name,
"voice": config["espeak"]["voice"], # e.g., en-us
"has_espeak": 1,
"n_speakers": config["num_speakers"],
"sample_rate": sample_rate,
}
print(meta_data)
add_meta_data(f"{lang}-{name}-{kind}.onnx", meta_data)
main()

View File

@@ -0,0 +1,74 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import argparse
import onnxmltools
from onnxmltools.utils.float16_converter import convert_float_to_float16
from onnxruntime.quantization import QuantType, quantize_dynamic
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
required=True,
)
parser.add_argument(
"--output-fp16",
type=str,
required=True,
)
parser.add_argument(
"--output-int8",
type=str,
required=True,
)
return parser.parse_args()
# for op_block_list, see also
# https://github.com/microsoft/onnxruntime/blob/089c52e4522491312e6839af146a276f2351972e/onnxruntime/python/tools/transformers/float16.py#L115
#
# libc++abi: terminating with uncaught exception of type Ort::Exception:
# Type Error: Type (tensor(float16)) of output arg (/dp/RandomNormalLike_output_0)
# of node (/dp/RandomNormalLike) does not match expected type (tensor(float)).
#
# libc++abi: terminating with uncaught exception of type Ort::Exception:
# This is an invalid model. Type Error: Type 'tensor(float16)' of input
# parameter (/enc_p/encoder/attn_layers.0/Constant_84_output_0) of
# operator (Range) in node (/Range_1) is invalid.
def export_onnx_fp16(onnx_fp32_path, onnx_fp16_path):
onnx_fp32_model = onnxmltools.utils.load_model(onnx_fp32_path)
onnx_fp16_model = convert_float_to_float16(
onnx_fp32_model,
keep_io_types=True,
op_block_list=[
"RandomNormalLike",
"Range",
],
)
onnxmltools.utils.save_model(onnx_fp16_model, onnx_fp16_path)
def main():
args = get_args()
print(args)
in_filename = args.input
output_fp16 = args.output_fp16
output_int8 = args.output_int8
quantize_dynamic(
model_input=in_filename,
model_output=output_int8,
weight_type=QuantType.QUInt8,
)
export_onnx_fp16(in_filename, output_fp16)
if __name__ == "__main__":
main()

1428
scripts/piper/generate.py Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,86 @@
#!/usr/bin/env bash
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
#
# Auto generated! Do NOT edit!
set -ex
log() {
# This function is from espnet
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}
wget -qq https://github.com/k2-fsa/sherpa-onnx/releases/download/tts-models/espeak-ng-data.tar.bz2
tar xf espeak-ng-data.tar.bz2
rm espeak-ng-data.tar.bz2
mkdir -p release
{% for model in model_list %}
name={{ model.name }}
kind={{ model.kind }}
lang={{ model.lang }}
model_name={{ model.model_name }}
text="{{ model.text }}"
num_speakers={{ model.ns }}
sample_rate={{ model.sr }}
{{ model.cmd }}
python3 ./add_meta_data.py \
--name $name \
--kind $kind \
--lang $lang
dst=vits-piper-$lang-$name-$kind
dst_int8=vits-piper-$lang-$name-$kind-int8
dst_fp16=vits-piper-$lang-$name-$kind-fp16
mkdir -p $dst
mv -v tokens.txt $dst/
mv -v MODEL_CARD $dst/ || true
mv -v README $dst/ || true
mv -v *.json $dst/
cp -a ./espeak-ng-data $dst/
cp -a $dst $dst_int8
cp -a $dst $dst_fp16
mv -v *.onnx $dst/
python3 ./dynamic_quantization.py \
--input $dst/$model_name \
--output-int8 $dst_int8/$model_name \
--output-fp16 $dst_fp16/$model_name >/dev/null 2>&1
echo "---fp32---"
ls -lh $dst
echo "---int8---"
ls -lh $dst_int8
echo "---fp16---"
ls -lh $dst_fp16
tar cjf ${dst}.tar.bz2 $dst
tar cjf ${dst_int8}.tar.bz2 $dst_int8
tar cjf ${dst_fp16}.tar.bz2 $dst_fp16
if [ -d hf ]; then
mkdir -p hf/piper/mp3/$lang/vits-piper-$lang-$name-$kind
for i in $(seq $num_speakers); do
i=$((i-1))
python3 ./generate_samples-$dst-$i.py
done
ls -lh hf/piper/mp3/$lang/vits-piper-$lang-$name-$kind
fi
mv $dst release
mv $dst_int8 release
mv $dst_fp16 release
ls -lh release/*
{% endfor %}

View File

@@ -0,0 +1,22 @@
import sherpa_onnx
import soundfile as sf
config = sherpa_onnx.OfflineTtsConfig(
model=sherpa_onnx.OfflineTtsModelConfig(
vits=sherpa_onnx.OfflineTtsVitsModelConfig(
model="{{ model }}",
lexicon="",
data_dir="{{ data_dir }}",
tokens="{{ tokens }}",
),
num_threads=1,
),
)
if not config.validate():
raise ValueError("Please check your config")
tts = sherpa_onnx.OfflineTts(config)
audio = tts.generate(text="{{text}}", sid={{sid}}, speed=1.0)
sf.write("{{ output_filename }}", audio.samples, samplerate=audio.sample_rate)