Refactor kokoro export (#2302)

- generate samples for https://k2-fsa.github.io/sherpa/onnx/tts/all/
- provide int8 model for kokoro v0.19 kokoro-int8-en-v0_19.tar.bz2
This commit is contained in:
Fangjun Kuang
2025-06-18 20:30:10 +08:00
committed by GitHub
parent 3878170991
commit 59d118c256
18 changed files with 494 additions and 215 deletions

1
scripts/kokoro/v0.19/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
kLegacy

View File

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import argparse
import onnx
import torch
from generate_voices_bin import speaker2id
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", type=str, required=True, help="input and output onnx model"
)
return parser.parse_args()
def main():
args = get_args()
print(args.model)
model = onnx.load(args.model)
style = torch.load(
"./kLegacy/v0.19/voices/af.pt", weights_only=True, map_location="cpu"
)
speaker2id_str = ""
id2speaker_str = ""
sep = ""
for s, i in speaker2id.items():
speaker2id_str += f"{sep}{s}->{i}"
id2speaker_str += f"{sep}{i}->{s}"
sep = ","
meta_data = {
"model_type": "kokoro",
"language": "English",
"has_espeak": 1,
"sample_rate": 24000,
"version": 1,
"voice": "en-us",
"style_dim": ",".join(map(str, style.shape)),
"n_speakers": len(speaker2id),
"speaker2id": speaker2id_str,
"id2speaker": id2speaker_str,
"speaker_names": ",".join(map(str, speaker2id.keys())),
"model_url": "https://huggingface.co/hexgrad/kLegacy/",
"see_also": "https://huggingface.co/spaces/hexgrad/Kokoro-TTS",
"maintainer": "k2-fsa",
"comment": "This is kokoro v0.19 and supports only English",
}
print(model.metadata_props)
while len(model.metadata_props):
model.metadata_props.pop()
for key, value in meta_data.items():
meta = model.metadata_props.add()
meta.key = key
meta.value = str(value)
print("--------------------")
print(model.metadata_props)
onnx.save(model, args.model)
print(f"Please see {args.model}, ./voices.bin, and ./tokens.txt")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
from pathlib import Path
import onnxruntime
from onnxruntime.quantization import QuantType, quantize_dynamic
def show(filename):
session_opts = onnxruntime.SessionOptions()
session_opts.log_severity_level = 3
sess = onnxruntime.InferenceSession(filename, session_opts)
for i in sess.get_inputs():
print(i)
print("-----")
for i in sess.get_outputs():
print(i)
"""
NodeArg(name='tokens', type='tensor(int64)', shape=[1, 'tokens1'])
NodeArg(name='style', type='tensor(float)', shape=[1, 256])
NodeArg(name='speed', type='tensor(float)', shape=[1])
-----
NodeArg(name='audio', type='tensor(float)', shape=['audio0'])
"""
def main():
show("./model.onnx")
if not Path("./model.int8.onnx").is_file():
quantize_dynamic(
model_input="model.onnx",
model_output="model.int8.onnx",
# op_types_to_quantize=["MatMul"],
weight_type=QuantType.QUInt8,
)
else:
print("./model.int8.onnx exists - skip")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
"""
Generate samples for
https://k2-fsa.github.io/sherpa/onnx/tts/all/
"""
import sherpa_onnx
import soundfile as sf
from generate_voices_bin import speaker2id
config = sherpa_onnx.OfflineTtsConfig(
model=sherpa_onnx.OfflineTtsModelConfig(
kokoro=sherpa_onnx.OfflineTtsKokoroModelConfig(
model="./model.onnx",
voices="./voices.bin",
tokens="./tokens.txt",
data_dir="./espeak-ng-data",
),
num_threads=2,
),
max_num_sentences=1,
)
if not config.validate():
raise ValueError("Please check your config")
tts = sherpa_onnx.OfflineTts(config)
text = "Friends fell out often because life was changing so fast. The easiest thing in the world was to lose touch with someone."
for s, i in speaker2id.items():
print(s, i, len(speaker2id))
audio = tts.generate(text, sid=i, speed=1.0)
sf.write(
f"./hf/kokoro/v0.19/mp3/{i}-{s}.mp3",
audio.samples,
samplerate=audio.sample_rate,
)

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
def get_vocab():
# https://huggingface.co/hexgrad/kLegacy/blob/main/v0.19/kokoro.py#L75
_pad = "$"
_punctuation = ';:,.!?¡¿—…"«»“” '
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'"
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
dicts = {}
for i in range(len((symbols))):
dicts[symbols[i]] = i
return dicts
def main():
token2id = get_vocab()
with open("tokens.txt", "w", encoding="utf-8") as f:
for s, i in token2id.items():
f.write(f"{s} {i}\n")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import torch
from pathlib import Path
id2speaker = {
0: "af",
1: "af_bella",
2: "af_nicole",
3: "af_sarah",
4: "af_sky",
5: "am_adam",
6: "am_michael",
7: "bf_emma",
8: "bf_isabella",
9: "bm_george",
10: "bm_lewis",
}
speaker2id = {speaker: idx for idx, speaker in id2speaker.items()}
def main():
if Path("./voices.bin").is_file():
print("./voices.bin exists - skip")
return
with open("voices.bin", "wb") as f:
for _, speaker in id2speaker.items():
m = torch.load(
f"kLegacy/v0.19/voices/{speaker}.pt",
weights_only=True,
map_location="cpu",
).numpy()
# m.shape (511, 1, 256)
f.write(m.tobytes())
if __name__ == "__main__":
main()

36
scripts/kokoro/v0.19/run.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env bash
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
set -ex
cat > README-new.md <<EOF
# Introduction
Files in this folder are from
git clone https://huggingface.co/hexgrad/kLegacy
EOF
if [ ! -d kLegacy ]; then
git clone https://huggingface.co/hexgrad/kLegacy
pushd kLegacy/v0.19
git lfs pull
popd
fi
if [ ! -f ./voices.bin ]; then
./generate_voices_bin.py
fi
if [ ! -f ./tokens.txt ]; then
./generate_tokens.py
fi
if [ ! -f ./model.onnx ]; then
mv kLegacy/v0.19/kokoro-v0_19.onnx ./model.onnx
fi
./add_meta_data.py --model ./model.onnx
if [ ! -f model.int8.onnx ]; then
./dynamic_quantization.py
fi

221
scripts/kokoro/v0.19/test.py Executable file
View File

@@ -0,0 +1,221 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
"""
female (7)
'af', 'af_bella', 'af_nicole','af_sarah', 'af_sky',
'bf_emma', 'bf_isabella',
male (4)
'am_adam', 'am_michael', 'bm_george', 'bm_lewis'
"""
import argparse
import time
from pathlib import Path
from typing import Dict, List
import numpy as np
try:
from piper_phonemize import phonemize_espeak
except Exception as ex:
raise RuntimeError(
f"{ex}\nPlease run\n"
"pip install piper_phonemize -f https://k2-fsa.github.io/icefall/piper_phonemize.html"
)
import onnxruntime as ort
import soundfile as sf
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
required=True,
help="Path to the model",
)
parser.add_argument(
"--voices-bin",
type=str,
required=True,
help="Path to the voices.bin",
)
parser.add_argument(
"--tokens",
type=str,
required=True,
help="Path to tokens.txt",
)
return parser.parse_args()
def show(filename):
session_opts = ort.SessionOptions()
session_opts.log_severity_level = 3
sess = ort.InferenceSession(filename, session_opts)
for i in sess.get_inputs():
print(i)
print("-----")
for i in sess.get_outputs():
print(i)
"""
NodeArg(name='tokens', type='tensor(int64)', shape=[1, 'tokens1'])
NodeArg(name='style', type='tensor(float)', shape=[1, 256])
NodeArg(name='speed', type='tensor(float)', shape=[1])
-----
NodeArg(name='audio', type='tensor(float)', shape=['audio0'])
"""
def load_tokens(filename: str) -> Dict[str, int]:
ans = dict()
with open(filename, encoding="utf-8") as f:
for line in f:
fields = line.strip().split()
if len(fields) == 2:
token, idx = fields
ans[token] = int(idx)
else:
assert len(fields) == 1, (len(fields), line)
ans[" "] = int(fields[0])
return ans
def load_voices(speaker_names: List[str], dim: List[int], voices_bin: str):
embedding = (
np.fromfile(voices_bin, dtype="uint8")
.view(np.float32)
.reshape(len(speaker_names), *dim)
)
print("embedding.shape", embedding.shape)
ans = dict()
for i in range(len(speaker_names)):
ans[speaker_names[i]] = embedding[i]
return ans
class OnnxModel:
def __init__(self, model_filename: str, voices_bin: str, tokens: str):
session_opts = ort.SessionOptions()
session_opts.inter_op_num_threads = 1
session_opts.intra_op_num_threads = 1
self.session_opts = session_opts
self.model = ort.InferenceSession(
model_filename,
sess_options=self.session_opts,
providers=["CPUExecutionProvider"],
)
self.token2id = load_tokens(tokens)
meta = self.model.get_modelmeta().custom_metadata_map
print(meta)
dim = list(map(int, meta["style_dim"].split(",")))
speaker_names = meta["speaker_names"].split(",")
self.voices = load_voices(
speaker_names=speaker_names, dim=dim, voices_bin=voices_bin
)
self.sample_rate = int(meta["sample_rate"])
print(list(self.voices.keys()))
# ['af', 'af_bella', 'af_nicole', 'af_sarah', 'af_sky', 'am_adam',
# 'am_michael', 'bf_emma', 'bf_isabella', 'bm_george', 'bm_lewis']
# af -> (511, 1, 256)
self.max_len = self.voices[next(iter(self.voices))].shape[0] - 1
def __call__(self, text: str, voice):
tokens = phonemize_espeak(text, "en-us")
# tokens is List[List[str]]
# Each sentence is a List[str]
# len(tokens) == number of sentences
tokens = sum(tokens, []) # flatten
tokens = "".join(tokens)
tokens = tokens.replace("kəkˈoːɹoʊ", "kˈoʊkəɹoʊ").replace(
"kəkˈɔːɹəʊ", "kˈəʊkəɹəʊ"
)
tokens = list(tokens)
token_ids = [self.token2id[i] for i in tokens]
token_ids = token_ids[: self.max_len]
style = self.voices[voice][len(token_ids)]
token_ids = [0, *token_ids, 0]
token_ids = np.array([token_ids], dtype=np.int64)
speed = np.array([1.0], dtype=np.float32)
audio = self.model.run(
[
self.model.get_outputs()[0].name,
],
{
self.model.get_inputs()[0].name: token_ids,
self.model.get_inputs()[1].name: style,
self.model.get_inputs()[2].name: speed,
},
)[0]
return audio
def main():
args = get_args()
print(vars(args))
show(args.model)
# tokens = phonemize_espeak("how are you doing?", "en-us")
# [['h', 'ˌ', 'a', 'ʊ', ' ', 'ɑ', 'ː', 'ɹ', ' ', 'j', 'u', 'ː', ' ', 'd', 'ˈ', 'u', 'ː', 'ɪ', 'ŋ', '?']]
m = OnnxModel(
model_filename=args.model, voices_bin=args.voices_bin, tokens=args.tokens
)
text = (
"Today as always, men fall into two groups: slaves and free men."
+ " Whoever does not have two-thirds of his day for himself, "
+ "is a slave, whatever he may be: a statesman, a businessman, "
+ "an official, or a scholar."
)
for i, voice in enumerate(m.voices.keys(), 1):
print(f"Testing {i}/{len(m.voices)} - {voice}/{args.model}")
start = time.time()
audio = m(text, voice=voice)
end = time.time()
elapsed_seconds = end - start
audio_duration = len(audio) / m.sample_rate
real_time_factor = elapsed_seconds / audio_duration
filename = f"{Path(args.model).stem}-{voice}.wav"
sf.write(
filename,
audio,
samplerate=m.sample_rate,
subtype="PCM_16",
)
print(f" Saved to {filename}")
print(f" Elapsed seconds: {elapsed_seconds:.3f}")
print(f" Audio duration in seconds: {audio_duration:.3f}")
print(
f" RTF: {elapsed_seconds:.3f}/{audio_duration:.3f} = {real_time_factor:.3f}"
)
if __name__ == "__main__":
main()