init ascend tts
This commit is contained in:
44
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/config.py
Normal file
44
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/config.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import os
|
||||
|
||||
|
||||
def check_fw_local_models():
|
||||
"""
|
||||
启动时检查本地是否有 Faster Whisper 模型.
|
||||
"""
|
||||
model_size_list = [
|
||||
"medium",
|
||||
"medium.en",
|
||||
"distil-large-v2",
|
||||
"distil-large-v3",
|
||||
"large-v1",
|
||||
"large-v2",
|
||||
"large-v3",
|
||||
]
|
||||
for i, size in enumerate(model_size_list):
|
||||
if os.path.exists(f"tools/asr/models/faster-whisper-{size}"):
|
||||
model_size_list[i] = size + "-local"
|
||||
return model_size_list
|
||||
|
||||
|
||||
def get_models():
|
||||
model_size_list = [
|
||||
"medium",
|
||||
"medium.en",
|
||||
"distil-large-v2",
|
||||
"distil-large-v3",
|
||||
"large-v1",
|
||||
"large-v2",
|
||||
"large-v3",
|
||||
]
|
||||
return model_size_list
|
||||
|
||||
|
||||
asr_dict = {
|
||||
"达摩 ASR (中文)": {"lang": ["zh", "yue"], "size": ["large"], "path": "funasr_asr.py", "precision": ["float32"]},
|
||||
"Faster Whisper (多语种)": {
|
||||
"lang": ["auto", "zh", "en", "ja", "ko", "yue"],
|
||||
"size": get_models(),
|
||||
"path": "fasterwhisper_asr.py",
|
||||
"precision": ["float32", "float16", "int8"],
|
||||
},
|
||||
}
|
||||
170
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/fasterwhisper_asr.py
Normal file
170
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/fasterwhisper_asr.py
Normal file
@@ -0,0 +1,170 @@
|
||||
import argparse
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import torch
|
||||
from faster_whisper import WhisperModel
|
||||
from huggingface_hub import snapshot_download
|
||||
from huggingface_hub.errors import LocalEntryNotFoundError
|
||||
from tqdm import tqdm
|
||||
|
||||
from tools.asr.config import get_models
|
||||
from tools.asr.funasr_asr import only_asr
|
||||
from tools.my_utils import load_cudnn
|
||||
|
||||
# fmt: off
|
||||
language_code_list = [
|
||||
"af", "am", "ar", "as", "az",
|
||||
"ba", "be", "bg", "bn", "bo",
|
||||
"br", "bs", "ca", "cs", "cy",
|
||||
"da", "de", "el", "en", "es",
|
||||
"et", "eu", "fa", "fi", "fo",
|
||||
"fr", "gl", "gu", "ha", "haw",
|
||||
"he", "hi", "hr", "ht", "hu",
|
||||
"hy", "id", "is", "it", "ja",
|
||||
"jw", "ka", "kk", "km", "kn",
|
||||
"ko", "la", "lb", "ln", "lo",
|
||||
"lt", "lv", "mg", "mi", "mk",
|
||||
"ml", "mn", "mr", "ms", "mt",
|
||||
"my", "ne", "nl", "nn", "no",
|
||||
"oc", "pa", "pl", "ps", "pt",
|
||||
"ro", "ru", "sa", "sd", "si",
|
||||
"sk", "sl", "sn", "so", "sq",
|
||||
"sr", "su", "sv", "sw", "ta",
|
||||
"te", "tg", "th", "tk", "tl",
|
||||
"tr", "tt", "uk", "ur", "uz",
|
||||
"vi", "yi", "yo", "zh", "yue",
|
||||
"auto"]
|
||||
# fmt: on
|
||||
|
||||
|
||||
def download_model(model_size: str):
|
||||
if "distil" in model_size:
|
||||
repo_id = "Systran/faster-{}-whisper-{}".format(*model_size.split("-", maxsplit=1))
|
||||
else:
|
||||
repo_id = f"Systran/faster-whisper-{model_size}"
|
||||
model_path = f"tools/asr/models/{repo_id.strip('Systran/')}"
|
||||
|
||||
files: list[str] = [
|
||||
"config.json",
|
||||
"model.bin",
|
||||
"tokenizer.json",
|
||||
"vocabulary.txt",
|
||||
]
|
||||
if model_size == "large-v3" or "distil" in model_size:
|
||||
files.append("preprocessor_config.json")
|
||||
files.append("vocabulary.json")
|
||||
|
||||
files.remove("vocabulary.txt")
|
||||
|
||||
for attempt in range(2):
|
||||
try:
|
||||
snapshot_download(
|
||||
repo_id=repo_id,
|
||||
allow_patterns=files,
|
||||
local_dir=model_path,
|
||||
)
|
||||
break
|
||||
except LocalEntryNotFoundError:
|
||||
if attempt < 1:
|
||||
time.sleep(2)
|
||||
else:
|
||||
print("[ERROR] LocalEntryNotFoundError and no fallback.")
|
||||
traceback.print_exc()
|
||||
exit(1)
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Unexpected error on attempt {attempt + 1}: {e}")
|
||||
traceback.print_exc()
|
||||
exit(1)
|
||||
|
||||
return model_path
|
||||
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_path, language, precision):
|
||||
if language == "auto":
|
||||
language = None # 不设置语种由模型自动输出概率最高的语种
|
||||
print("loading faster whisper model:", model_path, model_path)
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
model = WhisperModel(model_path, device=device, compute_type=precision)
|
||||
|
||||
input_file_names = os.listdir(input_folder)
|
||||
input_file_names.sort()
|
||||
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
|
||||
for file_name in tqdm(input_file_names):
|
||||
try:
|
||||
file_path = os.path.join(input_folder, file_name)
|
||||
segments, info = model.transcribe(
|
||||
audio=file_path,
|
||||
beam_size=5,
|
||||
vad_filter=True,
|
||||
vad_parameters=dict(min_silence_duration_ms=700),
|
||||
language=language,
|
||||
)
|
||||
text = ""
|
||||
|
||||
if info.language == "zh":
|
||||
print("检测为中文文本, 转 FunASR 处理")
|
||||
text = only_asr(file_path, language=info.language.lower())
|
||||
|
||||
if text == "":
|
||||
for segment in segments:
|
||||
text += segment.text
|
||||
output.append(f"{file_path}|{output_file_name}|{info.language.upper()}|{text}")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
traceback.print_exc()
|
||||
|
||||
output_folder = output_folder or "output/asr_opt"
|
||||
os.makedirs(output_folder, exist_ok=True)
|
||||
output_file_path = os.path.abspath(f"{output_folder}/{output_file_name}.list")
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
|
||||
load_cudnn()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-i", "--input_folder", type=str, required=True, help="Path to the folder containing WAV files."
|
||||
)
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.")
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--model_size",
|
||||
type=str,
|
||||
default="large-v3",
|
||||
choices=get_models(),
|
||||
help="Model Size of Faster Whisper",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--language", type=str, default="ja", choices=language_code_list, help="Language of the audio files."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--precision",
|
||||
type=str,
|
||||
default="float16",
|
||||
choices=["float16", "float32", "int8"],
|
||||
help="fp16, int8 or fp32",
|
||||
)
|
||||
|
||||
cmd = parser.parse_args()
|
||||
model_size = cmd.model_size
|
||||
if model_size == "large":
|
||||
model_size = "large-v3"
|
||||
model_path = download_model(model_size)
|
||||
output_file_path = execute_asr(
|
||||
input_folder=cmd.input_folder,
|
||||
output_folder=cmd.output_folder,
|
||||
model_path=model_path,
|
||||
language=cmd.language,
|
||||
precision=cmd.precision,
|
||||
)
|
||||
118
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/funasr_asr.py
Normal file
118
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/funasr_asr.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import traceback
|
||||
|
||||
# from funasr.utils import version_checker
|
||||
# version_checker.check_for_update = lambda: None
|
||||
from funasr import AutoModel
|
||||
from tqdm import tqdm
|
||||
|
||||
funasr_models = {} # 存储模型避免重复加载
|
||||
|
||||
|
||||
def only_asr(input_file, language):
|
||||
try:
|
||||
model = create_model(language)
|
||||
text = model.generate(input=input_file)[0]["text"]
|
||||
except:
|
||||
text = ""
|
||||
print(traceback.format_exc())
|
||||
return text
|
||||
|
||||
|
||||
def create_model(language="zh"):
|
||||
path_vad = "tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
||||
path_punc = "tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
||||
path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
||||
path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
||||
vad_model_revision = punc_model_revision = "v2.0.4"
|
||||
|
||||
if language == "zh":
|
||||
path_asr = "tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
path_asr = (
|
||||
path_asr
|
||||
if os.path.exists(path_asr)
|
||||
else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
)
|
||||
model_revision = "v2.0.4"
|
||||
elif language == "yue":
|
||||
path_asr = "tools/asr/models/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online"
|
||||
path_asr = (
|
||||
path_asr
|
||||
if os.path.exists(path_asr)
|
||||
else "iic/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online"
|
||||
)
|
||||
model_revision = "master"
|
||||
path_vad = path_punc = None
|
||||
vad_model_revision = punc_model_revision = None
|
||||
###友情提示:粤语带VAD识别可能会有少量shape不对报错的,但是不带VAD可以.不带vad只能分阶段单独加标点。不过标点模型对粤语效果真的不行…
|
||||
else:
|
||||
raise ValueError("FunASR 不支持该语言" + ": " + language)
|
||||
|
||||
if language in funasr_models:
|
||||
return funasr_models[language]
|
||||
else:
|
||||
model = AutoModel(
|
||||
model=path_asr,
|
||||
model_revision=model_revision,
|
||||
vad_model=path_vad,
|
||||
vad_model_revision=vad_model_revision,
|
||||
punc_model=path_punc,
|
||||
punc_model_revision=punc_model_revision,
|
||||
)
|
||||
print(f"FunASR 模型加载完成: {language.upper()}")
|
||||
|
||||
funasr_models[language] = model
|
||||
return model
|
||||
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_size, language):
|
||||
input_file_names = os.listdir(input_folder)
|
||||
input_file_names.sort()
|
||||
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
|
||||
model = create_model(language)
|
||||
|
||||
for file_name in tqdm(input_file_names):
|
||||
try:
|
||||
print("\n" + file_name)
|
||||
file_path = os.path.join(input_folder, file_name)
|
||||
text = model.generate(input=file_path)[0]["text"]
|
||||
output.append(f"{file_path}|{output_file_name}|{language.upper()}|{text}")
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
|
||||
output_folder = output_folder or "output/asr_opt"
|
||||
os.makedirs(output_folder, exist_ok=True)
|
||||
output_file_path = os.path.abspath(f"{output_folder}/{output_file_name}.list")
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-i", "--input_folder", type=str, required=True, help="Path to the folder containing WAV files."
|
||||
)
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-s", "--model_size", type=str, default="large", help="Model Size of FunASR is Large")
|
||||
parser.add_argument(
|
||||
"-l", "--language", type=str, default="zh", choices=["zh", "yue", "auto"], help="Language of the audio files."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--precision", type=str, default="float16", choices=["float16", "float32"], help="fp16 or fp32"
|
||||
) # 还没接入
|
||||
cmd = parser.parse_args()
|
||||
execute_asr(
|
||||
input_folder=cmd.input_folder,
|
||||
output_folder=cmd.output_folder,
|
||||
model_size=cmd.model_size,
|
||||
language=cmd.language,
|
||||
)
|
||||
2
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/models/.gitignore
vendored
Normal file
2
ascend_910-gpt-sovits/GPT-SoVITS/tools/asr/models/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
Reference in New Issue
Block a user