Refactor kokoro export (#2302)

- generate samples for https://k2-fsa.github.io/sherpa/onnx/tts/all/
- provide int8 model for kokoro v0.19 kokoro-int8-en-v0_19.tar.bz2
This commit is contained in:
Fangjun Kuang
2025-06-18 20:30:10 +08:00
committed by GitHub
parent 3878170991
commit 59d118c256
18 changed files with 494 additions and 215 deletions

View File

@@ -1,3 +1,4 @@
espeak-ng-data
voices.json
voices.bin
README-new.md

View File

@@ -1,10 +1,6 @@
# Introduction
This folder contains scripts for adding meta data to models
from https://github.com/thewh1teagle/kokoro-onnx/releases/tag/model-files
See also
https://huggingface.co/hexgrad/Kokoro-82M/tree/main
Please see also
https://huggingface.co/hexgrad/Kokoro-82M
and
https://huggingface.co/spaces/hexgrad/Kokoro-TTS
https://huggingface.co/hexgrad/Kokoro-82M/discussions/14

View File

@@ -1,117 +0,0 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import argparse
import json
from pathlib import Path
import numpy as np
import onnx
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", type=str, required=True, help="input and output onnx model"
)
parser.add_argument("--voices", type=str, required=True, help="Path to voices.json")
return parser.parse_args()
def load_voices(filename):
with open(filename) as f:
voices = json.load(f)
for key in voices:
voices[key] = np.array(voices[key], dtype=np.float32)
return voices
def get_vocab():
_pad = "$"
_punctuation = ';:,.!?¡¿—…"«»“” '
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'"
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
dicts = {}
for i in range(len((symbols))):
dicts[symbols[i]] = i
return dicts
def generate_tokens():
token2id = get_vocab()
with open("tokens.txt", "w", encoding="utf-8") as f:
for s, i in token2id.items():
f.write(f"{s} {i}\n")
def main():
args = get_args()
print(args.model, args.voices)
model = onnx.load(args.model)
voices = load_voices(args.voices)
if Path("./tokens.txt").is_file():
print("./tokens.txt exist, skip generating it")
else:
generate_tokens()
keys = list(voices.keys())
print(",".join(keys))
if Path("./voices.bin").is_file():
print("./voices.bin exists, skip generating it")
else:
with open("voices.bin", "wb") as f:
for k in keys:
f.write(voices[k].tobytes())
speaker2id_str = ""
id2speaker_str = ""
sep = ""
for i, s in enumerate(keys):
speaker2id_str += f"{sep}{s}->{i}"
id2speaker_str += f"{sep}{i}->{s}"
sep = ","
meta_data = {
"model_type": "kokoro",
"language": "English",
"has_espeak": 1,
"sample_rate": 24000,
"version": 1,
"voice": "en-us",
"style_dim": ",".join(map(str, voices[keys[0]].shape)),
"n_speakers": len(keys),
"speaker2id": speaker2id_str,
"id2speaker": id2speaker_str,
"speaker_names": ",".join(keys),
"model_url": "https://github.com/thewh1teagle/kokoro-onnx/releases/tag/model-files",
"see_also": "https://huggingface.co/spaces/hexgrad/Kokoro-TTS",
"see_also_2": "https://huggingface.co/hexgrad/Kokoro-82M",
"maintainer": "k2-fsa",
}
print(model.metadata_props)
while len(model.metadata_props):
model.metadata_props.pop()
for key, value in meta_data.items():
meta = model.metadata_props.add()
meta.key = key
meta.value = str(value)
print("--------------------")
print(model.metadata_props)
onnx.save(model, args.model)
print(f"Please see {args.model}, ./voices.bin, and ./tokens.txt")
if __name__ == "__main__":
main()

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env bash
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
set -ex
cat > README-new.md <<EOF
# Introduction
Files in this folder are from
https://github.com/thewh1teagle/kokoro-onnx/releases/tag/model-files
Please see also
https://huggingface.co/hexgrad/Kokoro-82M
and
https://huggingface.co/hexgrad/Kokoro-82M/discussions/14
EOF
files=(
# kokoro-v0_19_hf.onnx
kokoro-v0_19.onnx
# kokoro-quant.onnx
# kokoro-quant-convinteger.onnx
voices.json
)
for f in ${files[@]}; do
if [ ! -f ./$f ]; then
curl -SL -O https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/$f
fi
done
models=(
kokoro-v0_19
# kokoro-quant
# kokoro-quant-convinteger
# kokoro-v0_19_hf
)
for m in ${models[@]}; do
./add_meta_data.py --model $m.onnx --voices ./voices.json
done
ls -l
echo "----------"
ls -lh
for m in ${models[@]}; do
./test.py --model $m.onnx --voices-bin ./voices.bin --tokens ./tokens.txt
done
ls -lh

1
scripts/kokoro/v0.19/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
kLegacy

View File

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import argparse
import onnx
import torch
from generate_voices_bin import speaker2id
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", type=str, required=True, help="input and output onnx model"
)
return parser.parse_args()
def main():
args = get_args()
print(args.model)
model = onnx.load(args.model)
style = torch.load(
"./kLegacy/v0.19/voices/af.pt", weights_only=True, map_location="cpu"
)
speaker2id_str = ""
id2speaker_str = ""
sep = ""
for s, i in speaker2id.items():
speaker2id_str += f"{sep}{s}->{i}"
id2speaker_str += f"{sep}{i}->{s}"
sep = ","
meta_data = {
"model_type": "kokoro",
"language": "English",
"has_espeak": 1,
"sample_rate": 24000,
"version": 1,
"voice": "en-us",
"style_dim": ",".join(map(str, style.shape)),
"n_speakers": len(speaker2id),
"speaker2id": speaker2id_str,
"id2speaker": id2speaker_str,
"speaker_names": ",".join(map(str, speaker2id.keys())),
"model_url": "https://huggingface.co/hexgrad/kLegacy/",
"see_also": "https://huggingface.co/spaces/hexgrad/Kokoro-TTS",
"maintainer": "k2-fsa",
"comment": "This is kokoro v0.19 and supports only English",
}
print(model.metadata_props)
while len(model.metadata_props):
model.metadata_props.pop()
for key, value in meta_data.items():
meta = model.metadata_props.add()
meta.key = key
meta.value = str(value)
print("--------------------")
print(model.metadata_props)
onnx.save(model, args.model)
print(f"Please see {args.model}, ./voices.bin, and ./tokens.txt")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
from pathlib import Path
import onnxruntime
from onnxruntime.quantization import QuantType, quantize_dynamic
def show(filename):
session_opts = onnxruntime.SessionOptions()
session_opts.log_severity_level = 3
sess = onnxruntime.InferenceSession(filename, session_opts)
for i in sess.get_inputs():
print(i)
print("-----")
for i in sess.get_outputs():
print(i)
"""
NodeArg(name='tokens', type='tensor(int64)', shape=[1, 'tokens1'])
NodeArg(name='style', type='tensor(float)', shape=[1, 256])
NodeArg(name='speed', type='tensor(float)', shape=[1])
-----
NodeArg(name='audio', type='tensor(float)', shape=['audio0'])
"""
def main():
show("./model.onnx")
if not Path("./model.int8.onnx").is_file():
quantize_dynamic(
model_input="model.onnx",
model_output="model.int8.onnx",
# op_types_to_quantize=["MatMul"],
weight_type=QuantType.QUInt8,
)
else:
print("./model.int8.onnx exists - skip")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
"""
Generate samples for
https://k2-fsa.github.io/sherpa/onnx/tts/all/
"""
import sherpa_onnx
import soundfile as sf
from generate_voices_bin import speaker2id
config = sherpa_onnx.OfflineTtsConfig(
model=sherpa_onnx.OfflineTtsModelConfig(
kokoro=sherpa_onnx.OfflineTtsKokoroModelConfig(
model="./model.onnx",
voices="./voices.bin",
tokens="./tokens.txt",
data_dir="./espeak-ng-data",
),
num_threads=2,
),
max_num_sentences=1,
)
if not config.validate():
raise ValueError("Please check your config")
tts = sherpa_onnx.OfflineTts(config)
text = "Friends fell out often because life was changing so fast. The easiest thing in the world was to lose touch with someone."
for s, i in speaker2id.items():
print(s, i, len(speaker2id))
audio = tts.generate(text, sid=i, speed=1.0)
sf.write(
f"./hf/kokoro/v0.19/mp3/{i}-{s}.mp3",
audio.samples,
samplerate=audio.sample_rate,
)

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
def get_vocab():
# https://huggingface.co/hexgrad/kLegacy/blob/main/v0.19/kokoro.py#L75
_pad = "$"
_punctuation = ';:,.!?¡¿—…"«»“” '
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'"
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
dicts = {}
for i in range(len((symbols))):
dicts[symbols[i]] = i
return dicts
def main():
token2id = get_vocab()
with open("tokens.txt", "w", encoding="utf-8") as f:
for s, i in token2id.items():
f.write(f"{s} {i}\n")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
import torch
from pathlib import Path
id2speaker = {
0: "af",
1: "af_bella",
2: "af_nicole",
3: "af_sarah",
4: "af_sky",
5: "am_adam",
6: "am_michael",
7: "bf_emma",
8: "bf_isabella",
9: "bm_george",
10: "bm_lewis",
}
speaker2id = {speaker: idx for idx, speaker in id2speaker.items()}
def main():
if Path("./voices.bin").is_file():
print("./voices.bin exists - skip")
return
with open("voices.bin", "wb") as f:
for _, speaker in id2speaker.items():
m = torch.load(
f"kLegacy/v0.19/voices/{speaker}.pt",
weights_only=True,
map_location="cpu",
).numpy()
# m.shape (511, 1, 256)
f.write(m.tobytes())
if __name__ == "__main__":
main()

36
scripts/kokoro/v0.19/run.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env bash
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
set -ex
cat > README-new.md <<EOF
# Introduction
Files in this folder are from
git clone https://huggingface.co/hexgrad/kLegacy
EOF
if [ ! -d kLegacy ]; then
git clone https://huggingface.co/hexgrad/kLegacy
pushd kLegacy/v0.19
git lfs pull
popd
fi
if [ ! -f ./voices.bin ]; then
./generate_voices_bin.py
fi
if [ ! -f ./tokens.txt ]; then
./generate_tokens.py
fi
if [ ! -f ./model.onnx ]; then
mv kLegacy/v0.19/kokoro-v0_19.onnx ./model.onnx
fi
./add_meta_data.py --model ./model.onnx
if [ ! -f model.int8.onnx ]; then
./dynamic_quantization.py
fi

View File

@@ -67,11 +67,13 @@ def show(filename):
print(i)
# NodeArg(name='tokens', type='tensor(int64)', shape=[1, 'tokens1'])
# NodeArg(name='style', type='tensor(float)', shape=[1, 256])
# NodeArg(name='speed', type='tensor(float)', shape=[1])
# -----
# NodeArg(name='audio', type='tensor(float)', shape=['audio0'])
"""
NodeArg(name='tokens', type='tensor(int64)', shape=[1, 'tokens1'])
NodeArg(name='style', type='tensor(float)', shape=[1, 256])
NodeArg(name='speed', type='tensor(float)', shape=[1])
-----
NodeArg(name='audio', type='tensor(float)', shape=['audio0'])
"""
def load_tokens(filename: str) -> Dict[str, int]:
@@ -171,10 +173,6 @@ class OnnxModel:
return audio
def test(model, voice, text) -> np.ndarray:
pass
def main():
args = get_args()
print(vars(args))

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
"""
Generate samples for
https://k2-fsa.github.io/sherpa/onnx/tts/all/
"""
import sherpa_onnx
import soundfile as sf
from generate_voices_bin import speaker2id
config = sherpa_onnx.OfflineTtsConfig(
model=sherpa_onnx.OfflineTtsModelConfig(
kokoro=sherpa_onnx.OfflineTtsKokoroModelConfig(
model="./kokoro.onnx",
voices="./voices.bin",
tokens="./tokens.txt",
data_dir="./espeak-ng-data",
dict_dir="./dict",
lexicon="./lexicon-zh.txt,./lexicon-us-en.txt",
),
num_threads=2,
debug=True,
),
rule_fsts="./phone-zh.fst,./date-zh.fst,./number-zh.fst",
max_num_sentences=1,
)
if not config.validate():
raise ValueError("Please check your config")
tts = sherpa_onnx.OfflineTts(config)
text = "This model supports both Chinese and English. 小米的核心价值观是什么答案是真诚热爱有困难请拨打110 或者18601200909。I am learning 机器学习. 我在研究 machine learning。What do you think 中英文说的如何呢? 今天是 2025年6月18号."
print("text", text)
for s, i in speaker2id.items():
print(s, i, len(speaker2id))
audio = tts.generate(text, sid=i, speed=1.0)
sf.write(
f"./hf/kokoro/v1.0/mp3/{i}-{s}.mp3",
audio.samples,
samplerate=audio.sample_rate,
)

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env python3
# Copyright 2025 Xiaomi Corp. (authors: Fangjun Kuang)
"""
Generate samples for
https://k2-fsa.github.io/sherpa/onnx/tts/all/
"""
import sherpa_onnx
import soundfile as sf
from generate_voices_bin import speaker2id
config = sherpa_onnx.OfflineTtsConfig(
model=sherpa_onnx.OfflineTtsModelConfig(
kokoro=sherpa_onnx.OfflineTtsKokoroModelConfig(
model="./kokoro.onnx",
voices="./voices.bin",
tokens="./tokens.txt",
data_dir="./espeak-ng-data",
dict_dir="./dict",
lexicon="./lexicon-zh.txt,./lexicon-us-en.txt",
),
num_threads=2,
debug=True,
),
rule_fsts="./phone-zh.fst,./date-zh.fst,./number-zh.fst",
max_num_sentences=1,
)
if not config.validate():
raise ValueError("Please check your config")
tts = sherpa_onnx.OfflineTts(config)
text = "This model supports both Chinese and English. 小米的核心价值观是什么答案是真诚热爱有困难请拨打110 或者18601200909。I am learning 机器学习. 我在研究 machine learning。What do you think 中英文说的如何呢? 今天是 2025年6月18号."
print("text", text)
for s, i in speaker2id.items():
print(s, i, len(speaker2id))
audio = tts.generate(text, sid=i, speed=1.0)
sf.write(
f"./hf/kokoro/v1.1-zh/mp3/{i}-{s}.mp3",
audio.samples,
samplerate=audio.sample_rate,
)

View File

@@ -11,6 +11,8 @@ fi
if [ ! -f config.json ]; then
# see https://huggingface.co/hexgrad/Kokoro-82M/blob/main/config.json
curl -SL -O https://huggingface.co/hexgrad/Kokoro-82M-v1.1-zh/resolve/main/config.json
mkdir -p Kokoro-82M
cp ./config.json ./Kokoro-82M
fi
voices=(