init ascend tts
This commit is contained in:
@@ -0,0 +1,149 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/bucket_sampler.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
import itertools
|
||||
import math
|
||||
import random
|
||||
from random import shuffle
|
||||
from typing import Iterator, Optional, TypeVar
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch.utils.data import Dataset, Sampler
|
||||
|
||||
__all__ = [
|
||||
"DistributedBucketSampler",
|
||||
]
|
||||
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
|
||||
|
||||
class DistributedBucketSampler(Sampler[T_co]):
|
||||
r"""
|
||||
sort the dataset wrt. input length
|
||||
divide samples into buckets
|
||||
sort within buckets
|
||||
divide buckets into batches
|
||||
sort batches
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dataset: Dataset,
|
||||
num_replicas: Optional[int] = None,
|
||||
rank: Optional[int] = None,
|
||||
shuffle: bool = True,
|
||||
seed: int = 0,
|
||||
drop_last: bool = False,
|
||||
batch_size: int = 32,
|
||||
) -> None:
|
||||
if num_replicas is None:
|
||||
if not dist.is_available():
|
||||
raise RuntimeError("Requires distributed package to be available")
|
||||
num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1
|
||||
if rank is None:
|
||||
if not dist.is_available():
|
||||
raise RuntimeError("Requires distributed package to be available")
|
||||
rank = dist.get_rank() if torch.cuda.is_available() else 0
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.set_device(rank)
|
||||
if rank >= num_replicas or rank < 0:
|
||||
raise ValueError("Invalid rank {}, rank should be in the interval [0, {}]".format(rank, num_replicas - 1))
|
||||
self.dataset = dataset
|
||||
self.num_replicas = num_replicas
|
||||
self.rank = rank
|
||||
self.epoch = 0
|
||||
self.drop_last = drop_last
|
||||
# If the dataset length is evenly divisible by # of replicas, then there
|
||||
# is no need to drop any data, since the dataset will be split equally.
|
||||
if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
|
||||
# Split to nearest available length that is evenly divisible.
|
||||
# This is to ensure each rank receives the same amount of data when
|
||||
# using this Sampler.
|
||||
self.num_samples = math.ceil(
|
||||
(len(self.dataset) - self.num_replicas) / self.num_replicas, # type: ignore[arg-type]
|
||||
)
|
||||
else:
|
||||
self.num_samples = math.ceil(
|
||||
len(self.dataset) / self.num_replicas,
|
||||
) # type: ignore[arg-type]
|
||||
self.total_size = self.num_samples * self.num_replicas
|
||||
self.shuffle = shuffle
|
||||
self.seed = seed
|
||||
self.batch_size = batch_size
|
||||
self.id_with_length = self._get_sample_lengths()
|
||||
self.id_buckets = self.make_buckets(bucket_width=2.0)
|
||||
|
||||
def _get_sample_lengths(self):
|
||||
id_with_lengths = []
|
||||
for i in range(len(self.dataset)):
|
||||
id_with_lengths.append((i, self.dataset.get_sample_length(i)))
|
||||
id_with_lengths.sort(key=lambda x: x[1])
|
||||
return id_with_lengths
|
||||
|
||||
def make_buckets(self, bucket_width: float = 2.0):
|
||||
buckets = []
|
||||
cur = []
|
||||
max_sec = bucket_width
|
||||
for id, sec in self.id_with_length:
|
||||
if sec < max_sec:
|
||||
cur.append(id)
|
||||
else:
|
||||
buckets.append(cur)
|
||||
cur = [id]
|
||||
max_sec += bucket_width
|
||||
if len(cur) > 0:
|
||||
buckets.append(cur)
|
||||
return buckets
|
||||
|
||||
def __iter__(self) -> Iterator[T_co]:
|
||||
if self.shuffle:
|
||||
# deterministically shuffle based on epoch and seed
|
||||
g = torch.Generator()
|
||||
g.manual_seed(self.seed + self.epoch)
|
||||
random.seed(self.epoch + self.seed)
|
||||
shuffled_bucket = []
|
||||
for buc in self.id_buckets:
|
||||
buc_copy = buc.copy()
|
||||
shuffle(buc_copy)
|
||||
shuffled_bucket.append(buc_copy)
|
||||
grouped_batch_size = self.batch_size * self.num_replicas
|
||||
shuffled_bucket = list(itertools.chain(*shuffled_bucket))
|
||||
n_batch = int(math.ceil(len(shuffled_bucket) / grouped_batch_size))
|
||||
batches = [shuffled_bucket[b * grouped_batch_size : (b + 1) * grouped_batch_size] for b in range(n_batch)]
|
||||
shuffle(batches)
|
||||
indices = list(itertools.chain(*batches))
|
||||
else:
|
||||
# type: ignore[arg-type]
|
||||
indices = list(range(len(self.dataset)))
|
||||
|
||||
if not self.drop_last:
|
||||
# add extra samples to make it evenly divisible
|
||||
padding_size = self.total_size - len(indices)
|
||||
if padding_size <= len(indices):
|
||||
indices += indices[:padding_size]
|
||||
else:
|
||||
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
|
||||
else:
|
||||
# remove tail of data to make it evenly divisible.
|
||||
indices = indices[: self.total_size]
|
||||
assert len(indices) == self.total_size
|
||||
|
||||
# subsample
|
||||
indices = indices[self.rank : self.total_size : self.num_replicas]
|
||||
assert len(indices) == self.num_samples
|
||||
|
||||
return iter(indices)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self.num_samples
|
||||
|
||||
def set_epoch(self, epoch: int) -> None:
|
||||
r"""
|
||||
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
|
||||
use a different random ordering for each epoch. Otherwise, the next iteration of this
|
||||
sampler will yield the same ordering.
|
||||
|
||||
Args:
|
||||
epoch (int): Epoch number.
|
||||
"""
|
||||
self.epoch = epoch
|
||||
@@ -0,0 +1,81 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/data_module.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
from pytorch_lightning import LightningDataModule
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from AR.data.bucket_sampler import DistributedBucketSampler
|
||||
from AR.data.dataset import Text2SemanticDataset
|
||||
|
||||
|
||||
class Text2SemanticDataModule(LightningDataModule):
|
||||
def __init__(
|
||||
self,
|
||||
config,
|
||||
train_semantic_path,
|
||||
train_phoneme_path,
|
||||
dev_semantic_path=None,
|
||||
dev_phoneme_path=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.train_semantic_path = train_semantic_path
|
||||
self.train_phoneme_path = train_phoneme_path
|
||||
self.dev_semantic_path = dev_semantic_path
|
||||
self.dev_phoneme_path = dev_phoneme_path
|
||||
self.num_workers = self.config["data"]["num_workers"]
|
||||
|
||||
def prepare_data(self):
|
||||
pass
|
||||
|
||||
def setup(self, stage=None, output_logs=False):
|
||||
self._train_dataset = Text2SemanticDataset(
|
||||
phoneme_path=self.train_phoneme_path,
|
||||
semantic_path=self.train_semantic_path,
|
||||
max_sec=self.config["data"]["max_sec"],
|
||||
pad_val=self.config["data"]["pad_val"],
|
||||
)
|
||||
self._dev_dataset = self._train_dataset
|
||||
# self._dev_dataset = Text2SemanticDataset(
|
||||
# phoneme_path=self.dev_phoneme_path,
|
||||
# semantic_path=self.dev_semantic_path,
|
||||
# max_sample=self.config['data']['max_eval_sample'],
|
||||
# max_sec=self.config['data']['max_sec'],
|
||||
# pad_val=self.config['data']['pad_val'])
|
||||
|
||||
def train_dataloader(self):
|
||||
batch_size = (
|
||||
self.config["train"]["batch_size"] // 2
|
||||
if self.config["train"].get("if_dpo", False) is True
|
||||
else self.config["train"]["batch_size"]
|
||||
)
|
||||
batch_size = max(min(batch_size, len(self._train_dataset) // 4), 1) # 防止不保存
|
||||
sampler = DistributedBucketSampler(self._train_dataset, batch_size=batch_size)
|
||||
return DataLoader(
|
||||
self._train_dataset,
|
||||
batch_size=batch_size,
|
||||
sampler=sampler,
|
||||
collate_fn=self._train_dataset.collate,
|
||||
num_workers=self.num_workers,
|
||||
persistent_workers=True,
|
||||
prefetch_factor=16,
|
||||
)
|
||||
|
||||
def val_dataloader(self):
|
||||
return DataLoader(
|
||||
self._dev_dataset,
|
||||
batch_size=1,
|
||||
shuffle=False,
|
||||
collate_fn=self._train_dataset.collate,
|
||||
num_workers=max(self.num_workers, 12),
|
||||
persistent_workers=True,
|
||||
prefetch_factor=16,
|
||||
)
|
||||
|
||||
# 这个会使用到嘛?
|
||||
def test_dataloader(self):
|
||||
return DataLoader(
|
||||
self._dev_dataset,
|
||||
batch_size=1,
|
||||
shuffle=False,
|
||||
collate_fn=self._train_dataset.collate,
|
||||
)
|
||||
320
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/data/dataset.py
Normal file
320
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/data/dataset.py
Normal file
@@ -0,0 +1,320 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/dataset.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
|
||||
# sys.path.append("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert")
|
||||
import os
|
||||
import traceback
|
||||
from typing import Dict, List
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
version = os.environ.get("version", None)
|
||||
|
||||
from text import cleaned_text_to_sequence
|
||||
|
||||
# from config import exp_dir
|
||||
|
||||
|
||||
def batch_sequences(sequences: List[np.array], axis: int = 0, pad_value: int = 0):
|
||||
seq = sequences[0]
|
||||
ndim = seq.ndim
|
||||
if axis < 0:
|
||||
axis += ndim
|
||||
dtype = seq.dtype
|
||||
pad_value = dtype.type(pad_value)
|
||||
seq_lengths = [seq.shape[axis] for seq in sequences]
|
||||
max_length = np.max(seq_lengths)
|
||||
|
||||
padded_sequences = []
|
||||
for seq, length in zip(sequences, seq_lengths):
|
||||
padding = [(0, 0)] * axis + [(0, max_length - length)] + [(0, 0)] * (ndim - axis - 1)
|
||||
padded_seq = np.pad(seq, padding, mode="constant", constant_values=pad_value)
|
||||
padded_sequences.append(padded_seq)
|
||||
batch = np.stack(padded_sequences)
|
||||
return batch
|
||||
|
||||
|
||||
class Text2SemanticDataset(Dataset):
|
||||
"""dataset class for text tokens to semantic model training."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
phoneme_path: str,
|
||||
semantic_path: str,
|
||||
max_sample: int = None,
|
||||
max_sec: int = 100,
|
||||
pad_val: int = 1024,
|
||||
# min value of phoneme/sec
|
||||
min_ps_ratio: int = 3,
|
||||
# max value of phoneme/sec
|
||||
max_ps_ratio: int = 25,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.semantic_data = pd.read_csv(
|
||||
semantic_path,
|
||||
delimiter="\t",
|
||||
encoding="utf-8",
|
||||
)
|
||||
# get dict
|
||||
self.path2 = phoneme_path # "%s/2-name2text.txt"%exp_dir#phoneme_path
|
||||
self.path3 = "%s/3-bert" % (
|
||||
os.path.dirname(
|
||||
phoneme_path,
|
||||
)
|
||||
) # "%s/3-bert"%exp_dir#bert_dir
|
||||
self.path6 = semantic_path # "%s/6-name2semantic.tsv"%exp_dir#semantic_path
|
||||
assert os.path.exists(self.path2)
|
||||
assert os.path.exists(self.path6)
|
||||
self.phoneme_data = {}
|
||||
with open(self.path2, "r", encoding="utf8") as f:
|
||||
lines = f.read().strip("\n").split("\n")
|
||||
|
||||
for line in lines:
|
||||
tmp = line.split("\t")
|
||||
if len(tmp) != 4:
|
||||
continue
|
||||
self.phoneme_data[tmp[0]] = [tmp[1], tmp[2], tmp[3]]
|
||||
|
||||
# self.phoneme_data = np.load(phoneme_path, allow_pickle=True).item()
|
||||
# pad for semantic tokens
|
||||
self.PAD: int = pad_val
|
||||
# self.hz = 25
|
||||
# with open("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert/configs/s2.json", "r") as f:data = f.read()
|
||||
# data=json.loads(data)["model"]["semantic_frame_rate"]#50hz
|
||||
# self.hz=int(data[:-2])#
|
||||
self.hz = int(os.environ.get("hz", "25hz")[:-2])
|
||||
|
||||
# max seconds of semantic token
|
||||
self.max_sec = max_sec
|
||||
self.min_ps_ratio = min_ps_ratio
|
||||
self.max_ps_ratio = max_ps_ratio
|
||||
|
||||
if max_sample is not None:
|
||||
self.semantic_data = self.semantic_data[:max_sample]
|
||||
|
||||
# {idx: (semantic, phoneme)}
|
||||
# semantic list, phoneme list
|
||||
self.semantic_phoneme = []
|
||||
self.item_names = []
|
||||
|
||||
self.inited = False
|
||||
|
||||
if not self.inited:
|
||||
# 调用初始化函数
|
||||
self.init_batch()
|
||||
self.inited = True
|
||||
del self.semantic_data
|
||||
del self.phoneme_data
|
||||
# self.tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext-large")
|
||||
# self.tokenizer = AutoTokenizer.from_pretrained("/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large")
|
||||
|
||||
def init_batch(self):
|
||||
semantic_data_len = len(self.semantic_data)
|
||||
phoneme_data_len = len(self.phoneme_data.keys())
|
||||
print("semantic_data_len:", semantic_data_len)
|
||||
print("phoneme_data_len:", phoneme_data_len)
|
||||
print(self.semantic_data)
|
||||
idx = 0
|
||||
num_not_in = 0
|
||||
num_deleted_bigger = 0
|
||||
num_deleted_ps = 0
|
||||
for i in range(semantic_data_len):
|
||||
# 先依次遍历
|
||||
# get str
|
||||
item_name = self.semantic_data.iloc[i, 0]
|
||||
# print(self.phoneme_data)
|
||||
try:
|
||||
phoneme, word2ph, text = self.phoneme_data[item_name]
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
# print(f"{item_name} not in self.phoneme_data !")
|
||||
num_not_in += 1
|
||||
continue
|
||||
|
||||
semantic_str = self.semantic_data.iloc[i, 1]
|
||||
# get token list
|
||||
semantic_ids = [int(idx) for idx in semantic_str.split(" ")]
|
||||
# (T), 是否需要变成 (1, T) -> 不需要,因为需要求 len
|
||||
# 过滤掉太长的样本
|
||||
if (
|
||||
len(semantic_ids) > self.max_sec * self.hz
|
||||
): #########1###根据token个数推测总时长过滤时长60s(config里)#40*25=1k
|
||||
num_deleted_bigger += 1
|
||||
continue
|
||||
# (T, ), 这个速度不会很慢,所以可以在一开始就处理,无需在 __getitem__ 里面单个处理####
|
||||
phoneme = phoneme.split(" ")
|
||||
|
||||
try:
|
||||
phoneme_ids = cleaned_text_to_sequence(phoneme, version)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
# print(f"{item_name} not in self.phoneme_data !")
|
||||
num_not_in += 1
|
||||
continue
|
||||
# if len(phoneme_ids) >400:###########2:改为恒定限制为semantic/2.5就行
|
||||
if len(phoneme_ids) > self.max_sec * self.hz / 2.5: ###########2:改为恒定限制为semantic/2.5就行
|
||||
num_deleted_ps += 1
|
||||
continue
|
||||
# if len(semantic_ids) > 1000:###########3
|
||||
# num_deleted_bigger += 1
|
||||
# continue
|
||||
|
||||
ps_ratio = len(phoneme_ids) / (len(semantic_ids) / self.hz)
|
||||
|
||||
if ps_ratio > self.max_ps_ratio or ps_ratio < self.min_ps_ratio: ##########4#3~25#每秒多少个phone
|
||||
num_deleted_ps += 1
|
||||
# print(item_name)
|
||||
continue
|
||||
|
||||
self.semantic_phoneme.append((semantic_ids, phoneme_ids))
|
||||
idx += 1
|
||||
self.item_names.append(item_name)
|
||||
|
||||
min_num = 100 # 20直接不补#30补了也不存ckpt
|
||||
leng = len(self.semantic_phoneme)
|
||||
if leng < min_num:
|
||||
tmp1 = self.semantic_phoneme
|
||||
tmp2 = self.item_names
|
||||
self.semantic_phoneme = []
|
||||
self.item_names = []
|
||||
for _ in range(max(2, int(min_num / leng))):
|
||||
self.semantic_phoneme += tmp1
|
||||
self.item_names += tmp2
|
||||
if num_not_in > 0:
|
||||
print(f"there are {num_not_in} semantic datas not in phoneme datas")
|
||||
if num_deleted_bigger > 0:
|
||||
print(
|
||||
f"deleted {num_deleted_bigger} audios who's duration are bigger than {self.max_sec} seconds",
|
||||
)
|
||||
if num_deleted_ps > 0:
|
||||
# 4702 for LibriTTS, LirbriTTS 是标注数据, 是否需要筛?=> 需要,有值为 100 的极端值
|
||||
print(
|
||||
f"deleted {num_deleted_ps} audios who's phoneme/sec are bigger than {self.max_ps_ratio} or smaller than {self.min_ps_ratio}",
|
||||
)
|
||||
"""
|
||||
there are 31 semantic datas not in phoneme datas
|
||||
deleted 34 audios who's duration are bigger than 54 seconds
|
||||
deleted 3190 audios who's phoneme/sec are bigger than 25 or smaller than 3
|
||||
dataset.__len__(): 366463
|
||||
|
||||
"""
|
||||
# 345410 for LibriTTS
|
||||
print("dataset.__len__():", self.__len__())
|
||||
|
||||
def __get_item_names__(self) -> List[str]:
|
||||
return self.item_names
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.semantic_phoneme)
|
||||
|
||||
def __getitem__(self, idx: int) -> Dict:
|
||||
semantic_ids, phoneme_ids = self.semantic_phoneme[idx]
|
||||
item_name = self.item_names[idx]
|
||||
phoneme_ids_len = len(phoneme_ids)
|
||||
# semantic tokens target
|
||||
semantic_ids_len = len(semantic_ids)
|
||||
|
||||
flag = 0
|
||||
path_bert = "%s/%s.pt" % (self.path3, item_name)
|
||||
if os.path.exists(path_bert) == True:
|
||||
bert_feature = torch.load(path_bert, map_location="cpu")
|
||||
else:
|
||||
flag = 1
|
||||
if flag == 1:
|
||||
# bert_feature=torch.zeros_like(phoneme_ids,dtype=torch.float32)
|
||||
bert_feature = None
|
||||
else:
|
||||
assert bert_feature.shape[-1] == len(phoneme_ids)
|
||||
return {
|
||||
"idx": idx,
|
||||
"phoneme_ids": phoneme_ids,
|
||||
"phoneme_ids_len": phoneme_ids_len,
|
||||
"semantic_ids": semantic_ids,
|
||||
"semantic_ids_len": semantic_ids_len,
|
||||
"bert_feature": bert_feature,
|
||||
}
|
||||
|
||||
def get_sample_length(self, idx: int):
|
||||
semantic_ids = self.semantic_phoneme[idx][0]
|
||||
sec = 1.0 * len(semantic_ids) / self.hz
|
||||
return sec
|
||||
|
||||
def collate(self, examples: List[Dict]) -> Dict:
|
||||
sample_index: List[int] = []
|
||||
phoneme_ids: List[torch.Tensor] = []
|
||||
phoneme_ids_lens: List[int] = []
|
||||
semantic_ids: List[torch.Tensor] = []
|
||||
semantic_ids_lens: List[int] = []
|
||||
# return
|
||||
|
||||
for item in examples:
|
||||
sample_index.append(item["idx"])
|
||||
phoneme_ids.append(np.array(item["phoneme_ids"], dtype=np.int64))
|
||||
semantic_ids.append(np.array(item["semantic_ids"], dtype=np.int64))
|
||||
phoneme_ids_lens.append(item["phoneme_ids_len"])
|
||||
semantic_ids_lens.append(item["semantic_ids_len"])
|
||||
|
||||
# pad 0
|
||||
phoneme_ids = batch_sequences(phoneme_ids)
|
||||
semantic_ids = batch_sequences(semantic_ids, pad_value=self.PAD)
|
||||
|
||||
# # convert each batch to torch.tensor
|
||||
phoneme_ids = torch.tensor(phoneme_ids)
|
||||
semantic_ids = torch.tensor(semantic_ids)
|
||||
phoneme_ids_lens = torch.tensor(phoneme_ids_lens)
|
||||
semantic_ids_lens = torch.tensor(semantic_ids_lens)
|
||||
bert_padded = torch.FloatTensor(len(examples), 1024, max(phoneme_ids_lens))
|
||||
bert_padded.zero_()
|
||||
|
||||
for idx, item in enumerate(examples):
|
||||
bert = item["bert_feature"]
|
||||
if bert != None:
|
||||
bert_padded[idx, :, : bert.shape[-1]] = bert
|
||||
|
||||
return {
|
||||
# List[int]
|
||||
"ids": sample_index,
|
||||
# torch.Tensor (B, max_phoneme_length)
|
||||
"phoneme_ids": phoneme_ids,
|
||||
# torch.Tensor (B)
|
||||
"phoneme_ids_len": phoneme_ids_lens,
|
||||
# torch.Tensor (B, max_semantic_ids_length)
|
||||
"semantic_ids": semantic_ids,
|
||||
# torch.Tensor (B)
|
||||
"semantic_ids_len": semantic_ids_lens,
|
||||
# torch.Tensor (B, 1024, max_phoneme_length)
|
||||
"bert_feature": bert_padded,
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
root_dir = "/data/docker/liujing04/gpt-vits/prepare/dump_mix/"
|
||||
dataset = Text2SemanticDataset(
|
||||
phoneme_path=root_dir + "phoneme_train.npy",
|
||||
semantic_path=root_dir + "semantic_train.tsv",
|
||||
)
|
||||
|
||||
batch_size = 12
|
||||
dataloader = DataLoader(
|
||||
dataset,
|
||||
batch_size=batch_size,
|
||||
collate_fn=dataset.collate,
|
||||
shuffle=False,
|
||||
)
|
||||
for i, batch in enumerate(dataloader):
|
||||
if i % 1000 == 0:
|
||||
print(i)
|
||||
# if i == 0:
|
||||
# print('batch["ids"]:', batch["ids"])
|
||||
# print('batch["phoneme_ids"]:', batch["phoneme_ids"],
|
||||
# batch["phoneme_ids"].shape)
|
||||
# print('batch["phoneme_ids_len"]:', batch["phoneme_ids_len"],
|
||||
# batch["phoneme_ids_len"].shape)
|
||||
# print('batch["semantic_ids"]:', batch["semantic_ids"],
|
||||
# batch["semantic_ids"].shape)
|
||||
# print('batch["semantic_ids_len"]:', batch["semantic_ids_len"],
|
||||
# batch["semantic_ids_len"].shape)
|
||||
@@ -0,0 +1,146 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
import os
|
||||
import sys
|
||||
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
from pytorch_lightning import LightningModule
|
||||
|
||||
from AR.models.t2s_model import Text2SemanticDecoder
|
||||
from AR.modules.lr_schedulers import WarmupCosineLRSchedule
|
||||
from AR.modules.optim import ScaledAdam
|
||||
|
||||
|
||||
class Text2SemanticLightningModule(LightningModule):
|
||||
def __init__(self, config, output_dir, is_train=True):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.top_k = 3
|
||||
self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
|
||||
pretrained_s1 = config.get("pretrained_s1")
|
||||
if pretrained_s1 and is_train:
|
||||
# print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
|
||||
print(
|
||||
self.load_state_dict(
|
||||
torch.load(
|
||||
pretrained_s1,
|
||||
map_location="cpu",
|
||||
weights_only=False,
|
||||
)["weight"],
|
||||
)
|
||||
)
|
||||
if is_train:
|
||||
self.automatic_optimization = False
|
||||
self.save_hyperparameters()
|
||||
self.eval_dir = output_dir / "eval"
|
||||
self.eval_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def training_step(self, batch: Dict, batch_idx: int):
|
||||
opt = self.optimizers()
|
||||
scheduler = self.lr_schedulers()
|
||||
forward = self.model.forward if self.config["train"].get("if_dpo", False) == True else self.model.forward_old
|
||||
loss, acc = forward(
|
||||
batch["phoneme_ids"],
|
||||
batch["phoneme_ids_len"],
|
||||
batch["semantic_ids"],
|
||||
batch["semantic_ids_len"],
|
||||
batch["bert_feature"],
|
||||
)
|
||||
self.manual_backward(loss)
|
||||
if batch_idx > 0 and batch_idx % 4 == 0:
|
||||
opt.step()
|
||||
opt.zero_grad()
|
||||
scheduler.step()
|
||||
|
||||
self.log(
|
||||
"total_loss",
|
||||
loss,
|
||||
on_step=True,
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
sync_dist=True,
|
||||
)
|
||||
self.log(
|
||||
"lr",
|
||||
scheduler.get_last_lr()[0],
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
sync_dist=True,
|
||||
)
|
||||
self.log(
|
||||
f"top_{self.top_k}_acc",
|
||||
acc,
|
||||
on_step=True,
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
sync_dist=True,
|
||||
)
|
||||
|
||||
def validation_step(self, batch: Dict, batch_idx: int):
|
||||
return
|
||||
|
||||
# # get loss
|
||||
# loss, acc = self.model.forward(
|
||||
# batch['phoneme_ids'], batch['phoneme_ids_len'],
|
||||
# batch['semantic_ids'], batch['semantic_ids_len'],
|
||||
# batch['bert_feature']
|
||||
# )
|
||||
#
|
||||
# self.log(
|
||||
# "val_total_loss",
|
||||
# loss,
|
||||
# on_step=True,
|
||||
# on_epoch=True,
|
||||
# prog_bar=True,
|
||||
# sync_dist=True)
|
||||
# self.log(
|
||||
# f"val_top_{self.top_k}_acc",
|
||||
# acc,
|
||||
# on_step=True,
|
||||
# on_epoch=True,
|
||||
# prog_bar=True,
|
||||
# sync_dist=True)
|
||||
#
|
||||
# # get infer output
|
||||
# semantic_len = batch['semantic_ids'].size(1)
|
||||
# prompt_len = min(int(semantic_len * 0.5), 150)
|
||||
# prompt = batch['semantic_ids'][:, :prompt_len]
|
||||
# pred_semantic = self.model.infer(batch['phoneme_ids'],
|
||||
# batch['phoneme_ids_len'], prompt,
|
||||
# batch['bert_feature']
|
||||
# )
|
||||
# save_name = f'semantic_toks_{batch_idx}.pt'
|
||||
# save_path = os.path.join(self.eval_dir, save_name)
|
||||
# torch.save(pred_semantic.detach().cpu(), save_path)
|
||||
|
||||
def configure_optimizers(self):
|
||||
model_parameters = self.model.parameters()
|
||||
parameters_names = []
|
||||
parameters_names.append([name_param_pair[0] for name_param_pair in self.model.named_parameters()])
|
||||
lm_opt = ScaledAdam(
|
||||
model_parameters,
|
||||
lr=0.01,
|
||||
betas=(0.9, 0.95),
|
||||
clipping_scale=2.0,
|
||||
parameters_names=parameters_names,
|
||||
show_dominant_parameters=False,
|
||||
clipping_update_period=1000,
|
||||
)
|
||||
|
||||
return {
|
||||
"optimizer": lm_opt,
|
||||
"lr_scheduler": {
|
||||
"scheduler": WarmupCosineLRSchedule(
|
||||
lm_opt,
|
||||
init_lr=self.config["optimizer"]["lr_init"],
|
||||
peak_lr=self.config["optimizer"]["lr"],
|
||||
end_lr=self.config["optimizer"]["lr_end"],
|
||||
warmup_steps=self.config["optimizer"]["warmup_steps"],
|
||||
total_steps=self.config["optimizer"]["decay_steps"],
|
||||
)
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
import os
|
||||
import sys
|
||||
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
from pytorch_lightning import LightningModule
|
||||
|
||||
from AR.models.t2s_model_onnx import Text2SemanticDecoder
|
||||
from AR.modules.lr_schedulers import WarmupCosineLRSchedule
|
||||
from AR.modules.optim import ScaledAdam
|
||||
|
||||
|
||||
class Text2SemanticLightningModule(LightningModule):
|
||||
def __init__(self, config, output_dir, is_train=True):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.top_k = 3
|
||||
self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
|
||||
pretrained_s1 = config.get("pretrained_s1")
|
||||
if pretrained_s1 and is_train:
|
||||
# print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
|
||||
print(
|
||||
self.load_state_dict(
|
||||
torch.load(
|
||||
pretrained_s1,
|
||||
map_location="cpu",
|
||||
)["weight"],
|
||||
),
|
||||
)
|
||||
if is_train:
|
||||
self.automatic_optimization = False
|
||||
self.save_hyperparameters()
|
||||
self.eval_dir = output_dir / "eval"
|
||||
self.eval_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def training_step(self, batch: Dict, batch_idx: int):
|
||||
opt = self.optimizers()
|
||||
scheduler = self.lr_schedulers()
|
||||
loss, acc = self.model.forward(
|
||||
batch["phoneme_ids"],
|
||||
batch["phoneme_ids_len"],
|
||||
batch["semantic_ids"],
|
||||
batch["semantic_ids_len"],
|
||||
batch["bert_feature"],
|
||||
)
|
||||
self.manual_backward(loss)
|
||||
if batch_idx > 0 and batch_idx % 4 == 0:
|
||||
opt.step()
|
||||
opt.zero_grad()
|
||||
scheduler.step()
|
||||
|
||||
self.log(
|
||||
"total_loss",
|
||||
loss,
|
||||
on_step=True,
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
sync_dist=True,
|
||||
)
|
||||
self.log(
|
||||
"lr",
|
||||
scheduler.get_last_lr()[0],
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
sync_dist=True,
|
||||
)
|
||||
self.log(
|
||||
f"top_{self.top_k}_acc",
|
||||
acc,
|
||||
on_step=True,
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
sync_dist=True,
|
||||
)
|
||||
|
||||
def validation_step(self, batch: Dict, batch_idx: int):
|
||||
return
|
||||
|
||||
def configure_optimizers(self):
|
||||
model_parameters = self.model.parameters()
|
||||
parameters_names = []
|
||||
parameters_names.append([name_param_pair[0] for name_param_pair in self.model.named_parameters()])
|
||||
lm_opt = ScaledAdam(
|
||||
model_parameters,
|
||||
lr=0.01,
|
||||
betas=(0.9, 0.95),
|
||||
clipping_scale=2.0,
|
||||
parameters_names=parameters_names,
|
||||
show_dominant_parameters=False,
|
||||
clipping_update_period=1000,
|
||||
)
|
||||
|
||||
return {
|
||||
"optimizer": lm_opt,
|
||||
"lr_scheduler": {
|
||||
"scheduler": WarmupCosineLRSchedule(
|
||||
lm_opt,
|
||||
init_lr=self.config["optimizer"]["lr_init"],
|
||||
peak_lr=self.config["optimizer"]["lr"],
|
||||
end_lr=self.config["optimizer"]["lr_end"],
|
||||
warmup_steps=self.config["optimizer"]["warmup_steps"],
|
||||
total_steps=self.config["optimizer"]["decay_steps"],
|
||||
)
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,935 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
import math
|
||||
from typing import List, Optional
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
from torchmetrics.classification import MulticlassAccuracy
|
||||
from tqdm import tqdm
|
||||
|
||||
from AR.models.utils import (
|
||||
dpo_loss,
|
||||
get_batch_logps,
|
||||
make_pad_mask,
|
||||
make_pad_mask_left,
|
||||
make_reject_y,
|
||||
sample,
|
||||
topk_sampling,
|
||||
)
|
||||
from AR.modules.embedding import SinePositionalEmbedding, TokenEmbedding
|
||||
from AR.modules.transformer import LayerNorm, TransformerEncoder, TransformerEncoderLayer
|
||||
|
||||
default_config = {
|
||||
"embedding_dim": 512,
|
||||
"hidden_dim": 512,
|
||||
"num_head": 8,
|
||||
"num_layers": 12,
|
||||
"num_codebook": 8,
|
||||
"p_dropout": 0.0,
|
||||
"vocab_size": 1024 + 1,
|
||||
"phoneme_vocab_size": 512,
|
||||
"EOS": 1024,
|
||||
}
|
||||
|
||||
|
||||
# @torch.jit.script ## 使用的话首次推理会非常慢,而且推理速度不稳定
|
||||
# Efficient implementation equivalent to the following:
|
||||
def scaled_dot_product_attention(
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor,
|
||||
value: torch.Tensor,
|
||||
attn_mask: Optional[torch.Tensor] = None,
|
||||
scale: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
B, H, L, S = query.size(0), query.size(1), query.size(-2), key.size(-2)
|
||||
if scale is None:
|
||||
scale_factor = torch.tensor(1 / math.sqrt(query.size(-1)))
|
||||
else:
|
||||
scale_factor = scale
|
||||
attn_bias = torch.zeros(B, H, L, S, dtype=query.dtype, device=query.device)
|
||||
|
||||
if attn_mask is not None:
|
||||
if attn_mask.dtype == torch.bool:
|
||||
attn_bias.masked_fill_(attn_mask, float("-inf"))
|
||||
else:
|
||||
attn_bias += attn_mask
|
||||
attn_weight = query @ key.transpose(-2, -1) * scale_factor
|
||||
attn_weight += attn_bias
|
||||
attn_weight = torch.softmax(attn_weight, dim=-1)
|
||||
|
||||
if attn_mask is not None:
|
||||
if attn_mask.dtype == torch.bool:
|
||||
attn_weight.masked_fill_(attn_mask, 0)
|
||||
else:
|
||||
attn_mask[attn_mask != float("-inf")] = 0
|
||||
attn_mask[attn_mask == float("-inf")] = 1
|
||||
attn_weight.masked_fill_(attn_mask, 0)
|
||||
|
||||
return attn_weight @ value
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
class T2SMLP:
|
||||
def __init__(self, w1, b1, w2, b2):
|
||||
self.w1 = w1
|
||||
self.b1 = b1
|
||||
self.w2 = w2
|
||||
self.b2 = b2
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(F.linear(x, self.w1, self.b1))
|
||||
x = F.linear(x, self.w2, self.b2)
|
||||
return x
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
class T2SBlock:
|
||||
def __init__(
|
||||
self,
|
||||
num_heads,
|
||||
hidden_dim: int,
|
||||
mlp: T2SMLP,
|
||||
qkv_w,
|
||||
qkv_b,
|
||||
out_w,
|
||||
out_b,
|
||||
norm_w1,
|
||||
norm_b1,
|
||||
norm_eps1,
|
||||
norm_w2,
|
||||
norm_b2,
|
||||
norm_eps2,
|
||||
):
|
||||
self.num_heads = num_heads
|
||||
self.mlp = mlp
|
||||
self.hidden_dim: int = hidden_dim
|
||||
self.qkv_w = qkv_w
|
||||
self.qkv_b = qkv_b
|
||||
self.out_w = out_w
|
||||
self.out_b = out_b
|
||||
self.norm_w1 = norm_w1
|
||||
self.norm_b1 = norm_b1
|
||||
self.norm_eps1 = norm_eps1
|
||||
self.norm_w2 = norm_w2
|
||||
self.norm_b2 = norm_b2
|
||||
self.norm_eps2 = norm_eps2
|
||||
|
||||
self.false = torch.tensor(False, dtype=torch.bool)
|
||||
|
||||
@torch.jit.ignore
|
||||
def to_mask(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
padding_mask: Optional[torch.Tensor],
|
||||
):
|
||||
if padding_mask is None:
|
||||
return x
|
||||
|
||||
if padding_mask.dtype == torch.bool:
|
||||
return x.masked_fill(padding_mask, 0)
|
||||
else:
|
||||
return x * padding_mask
|
||||
|
||||
def process_prompt(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
attn_mask: torch.Tensor,
|
||||
padding_mask: Optional[torch.Tensor] = None,
|
||||
torch_sdpa: bool = True,
|
||||
):
|
||||
q, k, v = F.linear(self.to_mask(x, padding_mask), self.qkv_w, self.qkv_b).chunk(3, dim=-1)
|
||||
|
||||
batch_size = q.shape[0]
|
||||
q_len = q.shape[1]
|
||||
kv_len = k.shape[1]
|
||||
|
||||
q = self.to_mask(q, padding_mask)
|
||||
k_cache = self.to_mask(k, padding_mask)
|
||||
v_cache = self.to_mask(v, padding_mask)
|
||||
|
||||
q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
|
||||
k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
|
||||
if torch_sdpa:
|
||||
attn = F.scaled_dot_product_attention(q, k, v, ~attn_mask)
|
||||
else:
|
||||
attn = scaled_dot_product_attention(q, k, v, attn_mask)
|
||||
|
||||
attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1)
|
||||
attn = F.linear(self.to_mask(attn, padding_mask), self.out_w, self.out_b)
|
||||
|
||||
x = x + attn
|
||||
x = F.layer_norm(x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1)
|
||||
x = x + self.mlp.forward(x)
|
||||
x = F.layer_norm(
|
||||
x,
|
||||
[self.hidden_dim],
|
||||
self.norm_w2,
|
||||
self.norm_b2,
|
||||
self.norm_eps2,
|
||||
)
|
||||
return x, k_cache, v_cache
|
||||
|
||||
def decode_next_token(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
k_cache: torch.Tensor,
|
||||
v_cache: torch.Tensor,
|
||||
attn_mask: torch.Tensor = None,
|
||||
torch_sdpa: bool = True,
|
||||
):
|
||||
q, k, v = F.linear(x, self.qkv_w, self.qkv_b).chunk(3, dim=-1)
|
||||
|
||||
k_cache = torch.cat([k_cache, k], dim=1)
|
||||
v_cache = torch.cat([v_cache, v], dim=1)
|
||||
|
||||
batch_size = q.shape[0]
|
||||
q_len = q.shape[1]
|
||||
kv_len = k_cache.shape[1]
|
||||
|
||||
q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
|
||||
k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
|
||||
if torch_sdpa:
|
||||
attn = F.scaled_dot_product_attention(q, k, v, (~attn_mask) if attn_mask is not None else None)
|
||||
else:
|
||||
attn = scaled_dot_product_attention(q, k, v, attn_mask)
|
||||
|
||||
attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1)
|
||||
attn = F.linear(attn, self.out_w, self.out_b)
|
||||
|
||||
x = x + attn
|
||||
x = F.layer_norm(
|
||||
x,
|
||||
[self.hidden_dim],
|
||||
self.norm_w1,
|
||||
self.norm_b1,
|
||||
self.norm_eps1,
|
||||
)
|
||||
x = x + self.mlp.forward(x)
|
||||
x = F.layer_norm(
|
||||
x,
|
||||
[self.hidden_dim],
|
||||
self.norm_w2,
|
||||
self.norm_b2,
|
||||
self.norm_eps2,
|
||||
)
|
||||
return x, k_cache, v_cache
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
class T2STransformer:
|
||||
def __init__(self, num_blocks: int, blocks: List[T2SBlock]):
|
||||
self.num_blocks: int = num_blocks
|
||||
self.blocks = blocks
|
||||
|
||||
def process_prompt(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
attn_mask: torch.Tensor,
|
||||
padding_mask: Optional[torch.Tensor] = None,
|
||||
torch_sdpa: bool = True,
|
||||
):
|
||||
k_cache: List[torch.Tensor] = []
|
||||
v_cache: List[torch.Tensor] = []
|
||||
for i in range(self.num_blocks):
|
||||
x, k_cache_, v_cache_ = self.blocks[i].process_prompt(x, attn_mask, padding_mask, torch_sdpa)
|
||||
k_cache.append(k_cache_)
|
||||
v_cache.append(v_cache_)
|
||||
return x, k_cache, v_cache
|
||||
|
||||
def decode_next_token(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
k_cache: List[torch.Tensor],
|
||||
v_cache: List[torch.Tensor],
|
||||
attn_mask: torch.Tensor = None,
|
||||
torch_sdpa: bool = True,
|
||||
):
|
||||
for i in range(self.num_blocks):
|
||||
x, k_cache[i], v_cache[i] = self.blocks[i].decode_next_token(
|
||||
x, k_cache[i], v_cache[i], attn_mask, torch_sdpa
|
||||
)
|
||||
return x, k_cache, v_cache
|
||||
|
||||
|
||||
class Text2SemanticDecoder(nn.Module):
|
||||
def __init__(self, config, norm_first=False, top_k=3):
|
||||
super(Text2SemanticDecoder, self).__init__()
|
||||
self.model_dim = config["model"]["hidden_dim"]
|
||||
self.embedding_dim = config["model"]["embedding_dim"]
|
||||
self.num_head = config["model"]["head"]
|
||||
self.num_layers = config["model"]["n_layer"]
|
||||
self.norm_first = norm_first
|
||||
self.vocab_size = config["model"]["vocab_size"]
|
||||
self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
|
||||
self.p_dropout = config["model"]["dropout"]
|
||||
self.EOS = config["model"]["EOS"]
|
||||
self.norm_first = norm_first
|
||||
assert self.EOS == self.vocab_size - 1
|
||||
# should be same as num of kmeans bin
|
||||
# assert self.EOS == 1024
|
||||
self.bert_proj = nn.Linear(1024, self.embedding_dim)
|
||||
self.ar_text_embedding = TokenEmbedding(
|
||||
self.embedding_dim,
|
||||
self.phoneme_vocab_size,
|
||||
self.p_dropout,
|
||||
)
|
||||
self.ar_text_position = SinePositionalEmbedding(
|
||||
self.embedding_dim,
|
||||
dropout=0.1,
|
||||
scale=False,
|
||||
alpha=True,
|
||||
)
|
||||
self.ar_audio_embedding = TokenEmbedding(
|
||||
self.embedding_dim,
|
||||
self.vocab_size,
|
||||
self.p_dropout,
|
||||
)
|
||||
self.ar_audio_position = SinePositionalEmbedding(
|
||||
self.embedding_dim,
|
||||
dropout=0.1,
|
||||
scale=False,
|
||||
alpha=True,
|
||||
)
|
||||
|
||||
self.h = TransformerEncoder(
|
||||
TransformerEncoderLayer(
|
||||
d_model=self.model_dim,
|
||||
nhead=self.num_head,
|
||||
dim_feedforward=self.model_dim * 4,
|
||||
dropout=0.1,
|
||||
batch_first=True,
|
||||
norm_first=norm_first,
|
||||
),
|
||||
num_layers=self.num_layers,
|
||||
norm=LayerNorm(self.model_dim) if norm_first else None,
|
||||
)
|
||||
|
||||
self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
|
||||
self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
|
||||
|
||||
self.ar_accuracy_metric = MulticlassAccuracy(
|
||||
self.vocab_size,
|
||||
top_k=top_k,
|
||||
average="micro",
|
||||
multidim_average="global",
|
||||
ignore_index=self.EOS,
|
||||
)
|
||||
|
||||
blocks = []
|
||||
|
||||
for i in range(self.num_layers):
|
||||
layer = self.h.layers[i]
|
||||
t2smlp = T2SMLP(
|
||||
layer.linear1.weight,
|
||||
layer.linear1.bias,
|
||||
layer.linear2.weight,
|
||||
layer.linear2.bias,
|
||||
)
|
||||
|
||||
block = T2SBlock(
|
||||
self.num_head,
|
||||
self.model_dim,
|
||||
t2smlp,
|
||||
layer.self_attn.in_proj_weight,
|
||||
layer.self_attn.in_proj_bias,
|
||||
layer.self_attn.out_proj.weight,
|
||||
layer.self_attn.out_proj.bias,
|
||||
layer.norm1.weight,
|
||||
layer.norm1.bias,
|
||||
layer.norm1.eps,
|
||||
layer.norm2.weight,
|
||||
layer.norm2.bias,
|
||||
layer.norm2.eps,
|
||||
)
|
||||
|
||||
blocks.append(block)
|
||||
|
||||
self.t2s_transformer = T2STransformer(self.num_layers, blocks)
|
||||
|
||||
def make_input_data(self, x, x_lens, y, y_lens, bert_feature):
|
||||
x = self.ar_text_embedding(x)
|
||||
x = x + self.bert_proj(bert_feature.transpose(1, 2))
|
||||
x = self.ar_text_position(x)
|
||||
x_mask = make_pad_mask_left(x_lens)
|
||||
|
||||
y_mask = make_pad_mask(y_lens)
|
||||
y_mask_int = y_mask.type(torch.int64)
|
||||
codes = y.type(torch.int64) * (1 - y_mask_int)
|
||||
|
||||
# Training
|
||||
# AR Decoder
|
||||
y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
|
||||
x_len = x_lens.max()
|
||||
y_len = y_lens.max()
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
|
||||
xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
|
||||
|
||||
ar_xy_padding_mask = xy_padding_mask
|
||||
|
||||
x_attn_mask = F.pad(
|
||||
torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
|
||||
(0, y_len),
|
||||
value=True,
|
||||
)
|
||||
# x_attn_mask[:, x_len]=False
|
||||
y_attn_mask = F.pad(
|
||||
torch.triu(
|
||||
torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
|
||||
diagonal=1,
|
||||
),
|
||||
(x_len, 0),
|
||||
value=False,
|
||||
)
|
||||
|
||||
xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
|
||||
bsz, src_len = x.shape[0], x_len + y_len
|
||||
_xy_padding_mask = (
|
||||
ar_xy_padding_mask.view(bsz, 1, 1, src_len)
|
||||
.expand(-1, self.num_head, -1, -1)
|
||||
.reshape(bsz * self.num_head, 1, src_len)
|
||||
)
|
||||
xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
|
||||
new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
|
||||
new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
|
||||
xy_attn_mask = new_attn_mask
|
||||
# x 和完整的 y 一次性输入模型
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
|
||||
return xy_pos, xy_attn_mask, targets
|
||||
|
||||
def forward(self, x, x_lens, y, y_lens, bert_feature):
|
||||
"""
|
||||
x: phoneme_ids
|
||||
y: semantic_ids
|
||||
"""
|
||||
|
||||
reject_y, reject_y_lens = make_reject_y(y, y_lens)
|
||||
|
||||
xy_pos, xy_attn_mask, targets = self.make_input_data(x, x_lens, y, y_lens, bert_feature)
|
||||
|
||||
xy_dec, _ = self.h(
|
||||
(xy_pos, None),
|
||||
mask=xy_attn_mask,
|
||||
)
|
||||
x_len = x_lens.max()
|
||||
logits = self.ar_predict_layer(xy_dec[:, x_len-1:])
|
||||
|
||||
###### DPO #############
|
||||
reject_xy_pos, reject_xy_attn_mask, reject_targets = self.make_input_data(
|
||||
x, x_lens, reject_y, reject_y_lens, bert_feature
|
||||
)
|
||||
|
||||
reject_xy_dec, _ = self.h(
|
||||
(reject_xy_pos, None),
|
||||
mask=reject_xy_attn_mask,
|
||||
)
|
||||
x_len = x_lens.max()
|
||||
reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len-1:])
|
||||
|
||||
# loss
|
||||
# from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
|
||||
|
||||
loss_1 = F.cross_entropy(logits.permute(0, 2, 1), targets, reduction="sum")
|
||||
acc = self.ar_accuracy_metric(logits.permute(0, 2, 1).detach(), targets).item()
|
||||
|
||||
A_logits, R_logits = get_batch_logps(logits, reject_logits, targets, reject_targets)
|
||||
loss_2, _, _ = dpo_loss(A_logits, R_logits, 0, 0, 0.2, reference_free=True)
|
||||
|
||||
loss = loss_1 + loss_2
|
||||
|
||||
return loss, acc
|
||||
|
||||
def forward_old(self, x, x_lens, y, y_lens, bert_feature):
|
||||
"""
|
||||
x: phoneme_ids
|
||||
y: semantic_ids
|
||||
"""
|
||||
x = self.ar_text_embedding(x)
|
||||
x = x + self.bert_proj(bert_feature.transpose(1, 2))
|
||||
x = self.ar_text_position(x)
|
||||
x_mask = make_pad_mask_left(x_lens)
|
||||
|
||||
y_mask = make_pad_mask(y_lens)
|
||||
y_mask_int = y_mask.type(torch.int64)
|
||||
codes = y.type(torch.int64) * (1 - y_mask_int)
|
||||
|
||||
# Training
|
||||
# AR Decoder
|
||||
y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
|
||||
x_len = x_lens.max()
|
||||
y_len = y_lens.max()
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
|
||||
xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
|
||||
ar_xy_padding_mask = xy_padding_mask
|
||||
|
||||
x_attn_mask = F.pad(
|
||||
torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
|
||||
(0, y_len),
|
||||
value=True,
|
||||
)
|
||||
y_attn_mask = F.pad(
|
||||
torch.triu(
|
||||
torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
|
||||
diagonal=1,
|
||||
),
|
||||
(x_len, 0),
|
||||
value=False,
|
||||
)
|
||||
xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
|
||||
bsz, src_len = x.shape[0], x_len + y_len
|
||||
_xy_padding_mask = (
|
||||
ar_xy_padding_mask.view(bsz, 1, 1, src_len)
|
||||
.expand(-1, self.num_head, -1, -1)
|
||||
.reshape(bsz * self.num_head, 1, src_len)
|
||||
)
|
||||
xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
|
||||
new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
|
||||
new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
|
||||
xy_attn_mask = new_attn_mask
|
||||
# x 和完整的 y 一次性输入模型
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
xy_dec, _ = self.h(
|
||||
(xy_pos, None),
|
||||
mask=xy_attn_mask,
|
||||
)
|
||||
logits = self.ar_predict_layer(xy_dec[:, x_len-1:]).permute(0, 2, 1)
|
||||
# loss
|
||||
# from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
|
||||
loss = F.cross_entropy(logits, targets, reduction="sum")
|
||||
acc = self.ar_accuracy_metric(logits.detach(), targets).item()
|
||||
return loss, acc
|
||||
|
||||
# 需要看下这个函数和 forward 的区别以及没有 semantic 的时候 prompts 输入什么
|
||||
def infer(
|
||||
self,
|
||||
x,
|
||||
x_lens,
|
||||
prompts,
|
||||
bert_feature,
|
||||
top_k: int = -100,
|
||||
early_stop_num: int = -1,
|
||||
temperature: float = 1.0,
|
||||
):
|
||||
x = self.ar_text_embedding(x)
|
||||
x = x + self.bert_proj(bert_feature.transpose(1, 2))
|
||||
x = self.ar_text_position(x)
|
||||
|
||||
# AR Decoder
|
||||
y = prompts
|
||||
prefix_len = y.shape[1]
|
||||
x_len = x.shape[1]
|
||||
x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
|
||||
stop = False
|
||||
for _ in tqdm(range(1500)):
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
# x 和逐渐增长的 y 一起输入给模型
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
y_len = y.shape[1]
|
||||
x_attn_mask_pad = F.pad(
|
||||
x_attn_mask,
|
||||
(0, y_len),
|
||||
value=True,
|
||||
)
|
||||
y_attn_mask = F.pad(
|
||||
torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
|
||||
(x_len, 0),
|
||||
value=False,
|
||||
)
|
||||
xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(y.device)
|
||||
|
||||
xy_dec, _ = self.h(
|
||||
(xy_pos, None),
|
||||
mask=xy_attn_mask,
|
||||
)
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
samples = topk_sampling(logits, top_k=top_k, top_p=1.0, temperature=temperature)
|
||||
|
||||
if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
|
||||
print("use early stop num:", early_stop_num)
|
||||
stop = True
|
||||
|
||||
if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
|
||||
# print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
|
||||
stop = True
|
||||
if stop:
|
||||
if prompts.shape[1] == y.shape[1]:
|
||||
y = torch.concat([y, torch.zeros_like(samples)], dim=1)
|
||||
print("bad zero prediction")
|
||||
print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
|
||||
break
|
||||
# 本次生成的 semantic_ids 和之前的 y 构成新的 y
|
||||
# print(samples.shape)#[1,1]#第一个1是bs
|
||||
# import os
|
||||
# os._exit(2333)
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
return y
|
||||
|
||||
def pad_y_eos(self, y, y_mask_int, eos_id):
|
||||
targets = F.pad(y, (0, 1), value=0) + eos_id * F.pad(y_mask_int, (0, 1), value=1)
|
||||
# 错位
|
||||
return targets[:, :-1], targets
|
||||
|
||||
def infer_panel_batch_infer(
|
||||
self,
|
||||
x: List[torch.LongTensor], #####全部文本token
|
||||
x_lens: torch.LongTensor,
|
||||
prompts: torch.LongTensor, ####参考音频token
|
||||
bert_feature: List[torch.LongTensor],
|
||||
top_k: int = -100,
|
||||
top_p: int = 100,
|
||||
early_stop_num: int = -1,
|
||||
temperature: float = 1.0,
|
||||
repetition_penalty: float = 1.35,
|
||||
**kwargs,
|
||||
):
|
||||
if prompts is None:
|
||||
print("Warning: Prompt free is not supported batch_infer! switch to naive_infer")
|
||||
return self.infer_panel_naive_batched(
|
||||
x,
|
||||
x_lens,
|
||||
prompts,
|
||||
bert_feature,
|
||||
top_k=top_k,
|
||||
top_p=top_p,
|
||||
early_stop_num=early_stop_num,
|
||||
temperature=temperature,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
max_len = kwargs.get("max_len", x_lens.max())
|
||||
x_list = []
|
||||
for x_item, bert_item in zip(x, bert_feature):
|
||||
# max_len = max(max_len, x_item.shape[0], bert_item.shape[1])
|
||||
x_item = self.ar_text_embedding(x_item.unsqueeze(0))
|
||||
x_item = x_item + self.bert_proj(bert_item.transpose(0, 1).unsqueeze(0))
|
||||
x_item = self.ar_text_position(x_item).squeeze(0)
|
||||
# x_item = F.pad(x_item,(0,0,0,max_len-x_item.shape[0]),value=0) if x_item.shape[0]<max_len else x_item ### padding right
|
||||
x_item = (
|
||||
F.pad(x_item, (0, 0, max_len - x_item.shape[0], 0), value=0) if x_item.shape[0] < max_len else x_item
|
||||
) ### padding left
|
||||
x_list.append(x_item)
|
||||
x: torch.Tensor = torch.stack(x_list, dim=0)
|
||||
|
||||
# AR Decoder
|
||||
y = prompts
|
||||
|
||||
x_len = x.shape[1]
|
||||
stop = False
|
||||
|
||||
k_cache = None
|
||||
v_cache = None
|
||||
################### first step ##########################
|
||||
assert y is not None, "Error: Prompt free is not supported batch_infer!"
|
||||
ref_free = False
|
||||
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
y_len = y_emb.shape[1]
|
||||
prefix_len = y.shape[1]
|
||||
y_lens = torch.LongTensor([y_emb.shape[1]] * y_emb.shape[0]).to(x.device)
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
|
||||
##### create mask #####
|
||||
bsz = x.shape[0]
|
||||
src_len = x_len + y_len
|
||||
y_paddind_mask = make_pad_mask_left(y_lens, y_len)
|
||||
x_paddind_mask = make_pad_mask_left(x_lens, max_len)
|
||||
|
||||
# (bsz, x_len + y_len)
|
||||
padding_mask = torch.concat([x_paddind_mask, y_paddind_mask], dim=1)
|
||||
|
||||
x_mask = F.pad(
|
||||
torch.zeros(x_len, x_len, dtype=torch.bool, device=x.device),
|
||||
(0, y_len),
|
||||
value=True,
|
||||
)
|
||||
|
||||
y_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
|
||||
torch.triu(torch.ones(y_len, y_len, dtype=torch.bool, device=x.device), diagonal=1),
|
||||
(x_len, 0),
|
||||
value=False,
|
||||
)
|
||||
|
||||
causal_mask = torch.concat([x_mask, y_mask], dim=0).view(1, src_len, src_len).repeat(bsz, 1, 1).to(x.device)
|
||||
# padding_mask = padding_mask.unsqueeze(1) * padding_mask.unsqueeze(2) ### [b, x+y, x+y]
|
||||
### 上面是错误的,会导致padding的token被"看见"
|
||||
|
||||
# 正确的padding_mask应该是:
|
||||
# | pad_len | x_len | y_len |
|
||||
# [[PAD, PAD, PAD, 1, 2, 3, 4, 5, 6],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], 前3行按理说也应该被mask掉,但是为了防止计算attention时不出现nan,还是保留了,不影响结果
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6]]
|
||||
|
||||
padding_mask = padding_mask.view(bsz, 1, src_len).repeat(1, src_len, 1)
|
||||
|
||||
attn_mask: torch.Tensor = causal_mask.logical_or(padding_mask)
|
||||
attn_mask = attn_mask.unsqueeze(1).expand(-1, self.num_head, -1, -1).bool()
|
||||
|
||||
# 正确的attn_mask应该是这样的:
|
||||
# | pad_len | x_len | y_len |
|
||||
# [[PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS], 前3行按理说也应该被mask掉,但是为了防止计算attention时不出现nan,还是保留了,不影响结果
|
||||
# [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, EOS, EOS],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, EOS],
|
||||
# [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6]]
|
||||
|
||||
###### decode #####
|
||||
y_list = [None] * y.shape[0]
|
||||
batch_idx_map = list(range(y.shape[0]))
|
||||
idx_list = [None] * y.shape[0]
|
||||
for idx in tqdm(range(1500)):
|
||||
if idx == 0:
|
||||
xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, attn_mask, None)
|
||||
else:
|
||||
xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache, attn_mask)
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
|
||||
if idx == 0:
|
||||
attn_mask = F.pad(attn_mask[:, :, -1].unsqueeze(-2), (0, 1), value=False)
|
||||
logits = logits[:, :-1]
|
||||
else:
|
||||
attn_mask = F.pad(attn_mask, (0, 1), value=False)
|
||||
|
||||
samples = sample(
|
||||
logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature
|
||||
)[0]
|
||||
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
|
||||
####### 移除batch中已经生成完毕的序列,进一步优化计算量
|
||||
tokens = torch.argmax(logits, dim=-1)
|
||||
reserved_idx_of_batch_for_y = None
|
||||
if (self.EOS in samples[:, 0]) or (self.EOS in tokens): ###如果生成到EOS,则停止
|
||||
l1 = samples[:, 0] == self.EOS
|
||||
l2 = tokens == self.EOS
|
||||
l = l1.logical_or(l2)
|
||||
removed_idx_of_batch_for_y = torch.where(l == True)[0].tolist()
|
||||
reserved_idx_of_batch_for_y = torch.where(l == False)[0]
|
||||
# batch_indexs = torch.tensor(batch_idx_map, device=y.device)[removed_idx_of_batch_for_y]
|
||||
for i in removed_idx_of_batch_for_y:
|
||||
batch_index = batch_idx_map[i]
|
||||
idx_list[batch_index] = idx
|
||||
y_list[batch_index] = y[i, :-1]
|
||||
|
||||
batch_idx_map = [batch_idx_map[i] for i in reserved_idx_of_batch_for_y.tolist()]
|
||||
|
||||
# 只保留batch中未生成完毕的序列
|
||||
if reserved_idx_of_batch_for_y is not None:
|
||||
# index = torch.LongTensor(batch_idx_map).to(y.device)
|
||||
y = torch.index_select(y, dim=0, index=reserved_idx_of_batch_for_y)
|
||||
attn_mask = torch.index_select(attn_mask, dim=0, index=reserved_idx_of_batch_for_y)
|
||||
if k_cache is not None:
|
||||
for i in range(len(k_cache)):
|
||||
k_cache[i] = torch.index_select(k_cache[i], dim=0, index=reserved_idx_of_batch_for_y)
|
||||
v_cache[i] = torch.index_select(v_cache[i], dim=0, index=reserved_idx_of_batch_for_y)
|
||||
|
||||
if (early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num) or idx == 1499:
|
||||
print("use early stop num:", early_stop_num)
|
||||
stop = True
|
||||
for i, batch_index in enumerate(batch_idx_map):
|
||||
batch_index = batch_idx_map[i]
|
||||
idx_list[batch_index] = idx
|
||||
y_list[batch_index] = y[i, :-1]
|
||||
|
||||
if None not in idx_list:
|
||||
stop = True
|
||||
|
||||
if stop:
|
||||
if y.shape[1] == 0:
|
||||
y = torch.concat([y, torch.zeros_like(samples)], dim=1)
|
||||
print("bad zero prediction")
|
||||
print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
|
||||
break
|
||||
|
||||
####################### update next step ###################################
|
||||
y_emb = self.ar_audio_embedding(y[:, -1:])
|
||||
xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[
|
||||
:, y_len + idx
|
||||
].to(dtype=y_emb.dtype, device=y_emb.device)
|
||||
|
||||
if None in idx_list:
|
||||
for i in range(x.shape[0]):
|
||||
if idx_list[i] is None:
|
||||
idx_list[i] = 1500 - 1 ###如果没有生成到EOS,就用最大长度代替
|
||||
|
||||
if ref_free:
|
||||
return y_list, [0] * x.shape[0]
|
||||
# print(idx_list)
|
||||
return y_list, idx_list
|
||||
|
||||
def infer_panel_naive_batched(
|
||||
self,
|
||||
x: List[torch.LongTensor], #####全部文本token
|
||||
x_lens: torch.LongTensor,
|
||||
prompts: torch.LongTensor, ####参考音频token
|
||||
bert_feature: List[torch.LongTensor],
|
||||
top_k: int = -100,
|
||||
top_p: int = 100,
|
||||
early_stop_num: int = -1,
|
||||
temperature: float = 1.0,
|
||||
repetition_penalty: float = 1.35,
|
||||
**kwargs,
|
||||
):
|
||||
y_list = []
|
||||
idx_list = []
|
||||
for i in range(len(x)):
|
||||
y, idx = self.infer_panel_naive(
|
||||
x[i].unsqueeze(0),
|
||||
x_lens[i],
|
||||
prompts[i].unsqueeze(0) if prompts is not None else None,
|
||||
bert_feature[i].unsqueeze(0),
|
||||
top_k,
|
||||
top_p,
|
||||
early_stop_num,
|
||||
temperature,
|
||||
repetition_penalty,
|
||||
**kwargs,
|
||||
)
|
||||
y_list.append(y[0])
|
||||
idx_list.append(idx)
|
||||
|
||||
return y_list, idx_list
|
||||
|
||||
def infer_panel_naive(
|
||||
self,
|
||||
x: torch.LongTensor, #####全部文本token
|
||||
x_lens: torch.LongTensor,
|
||||
prompts: torch.LongTensor, ####参考音频token
|
||||
bert_feature: torch.LongTensor,
|
||||
top_k: int = -100,
|
||||
top_p: int = 100,
|
||||
early_stop_num: int = -1,
|
||||
temperature: float = 1.0,
|
||||
repetition_penalty: float = 1.35,
|
||||
**kwargs,
|
||||
):
|
||||
x = self.ar_text_embedding(x)
|
||||
x = x + self.bert_proj(bert_feature.transpose(1, 2))
|
||||
x = self.ar_text_position(x)
|
||||
|
||||
# AR Decoder
|
||||
y = prompts
|
||||
|
||||
x_len = x.shape[1]
|
||||
x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
|
||||
stop = False
|
||||
# print(1111111,self.num_layers)
|
||||
|
||||
k_cache = None
|
||||
v_cache = None
|
||||
################### first step ##########################
|
||||
if y is not None:
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
y_len = y_emb.shape[1]
|
||||
prefix_len = y.shape[1]
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
ref_free = False
|
||||
else:
|
||||
y_emb = None
|
||||
y_len = 0
|
||||
prefix_len = 0
|
||||
y_pos = None
|
||||
xy_pos = x
|
||||
y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
|
||||
ref_free = True
|
||||
|
||||
bsz = x.shape[0]
|
||||
src_len = x_len + y_len
|
||||
x_attn_mask_pad = F.pad(
|
||||
x_attn_mask,
|
||||
(0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
|
||||
value=True,
|
||||
)
|
||||
y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
|
||||
torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
|
||||
(x_len, 0),
|
||||
value=False,
|
||||
)
|
||||
xy_attn_mask = (
|
||||
torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
|
||||
.unsqueeze(0)
|
||||
.expand(bsz * self.num_head, -1, -1)
|
||||
.view(bsz, self.num_head, src_len, src_len)
|
||||
.to(device=x.device, dtype=torch.bool)
|
||||
)
|
||||
|
||||
for idx in tqdm(range(1500)):
|
||||
if xy_attn_mask is not None:
|
||||
xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, None)
|
||||
else:
|
||||
xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache)
|
||||
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
|
||||
if idx == 0:
|
||||
xy_attn_mask = None
|
||||
if idx < 11: ###至少预测出10个token不然不给停止(0.4s)
|
||||
logits = logits[:, :-1]
|
||||
|
||||
samples = sample(
|
||||
logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature
|
||||
)[0]
|
||||
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
|
||||
if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
|
||||
print("use early stop num:", early_stop_num)
|
||||
stop = True
|
||||
|
||||
if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
|
||||
stop = True
|
||||
if stop:
|
||||
if y.shape[1] == 0:
|
||||
y = torch.concat([y, torch.zeros_like(samples)], dim=1)
|
||||
print("bad zero prediction")
|
||||
print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
|
||||
break
|
||||
|
||||
####################### update next step ###################################
|
||||
y_emb = self.ar_audio_embedding(y[:, -1:])
|
||||
xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[
|
||||
:, y_len + idx
|
||||
].to(dtype=y_emb.dtype, device=y_emb.device)
|
||||
|
||||
if ref_free:
|
||||
return y[:, :-1], 0
|
||||
return y[:, :-1], idx
|
||||
|
||||
def infer_panel(
|
||||
self,
|
||||
x: torch.LongTensor, #####全部文本token
|
||||
x_lens: torch.LongTensor,
|
||||
prompts: torch.LongTensor, ####参考音频token
|
||||
bert_feature: torch.LongTensor,
|
||||
top_k: int = -100,
|
||||
top_p: int = 100,
|
||||
early_stop_num: int = -1,
|
||||
temperature: float = 1.0,
|
||||
repetition_penalty: float = 1.35,
|
||||
**kwargs,
|
||||
):
|
||||
return self.infer_panel_naive(
|
||||
x, x_lens, prompts, bert_feature, top_k, top_p, early_stop_num, temperature, repetition_penalty, **kwargs
|
||||
)
|
||||
@@ -0,0 +1,394 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
from torchmetrics.classification import MulticlassAccuracy
|
||||
|
||||
from AR.modules.embedding_onnx import SinePositionalEmbedding, TokenEmbedding
|
||||
from AR.modules.transformer_onnx import LayerNorm, TransformerEncoder, TransformerEncoderLayer
|
||||
|
||||
default_config = {
|
||||
"embedding_dim": 512,
|
||||
"hidden_dim": 512,
|
||||
"num_head": 8,
|
||||
"num_layers": 12,
|
||||
"num_codebook": 8,
|
||||
"p_dropout": 0.0,
|
||||
"vocab_size": 1024 + 1,
|
||||
"phoneme_vocab_size": 512,
|
||||
"EOS": 1024,
|
||||
}
|
||||
|
||||
inf_tensor_value = torch.FloatTensor([-float("Inf")]).float()
|
||||
|
||||
|
||||
def logits_to_probs(
|
||||
logits,
|
||||
previous_tokens=None,
|
||||
temperature: float = 1.0,
|
||||
top_k=None,
|
||||
top_p=None,
|
||||
repetition_penalty: float = 1.0,
|
||||
):
|
||||
previous_tokens = previous_tokens.squeeze()
|
||||
if previous_tokens is not None and repetition_penalty != 1.0:
|
||||
previous_tokens = previous_tokens.long()
|
||||
score = torch.gather(logits, dim=0, index=previous_tokens)
|
||||
score = torch.where(
|
||||
score < 0,
|
||||
score * repetition_penalty,
|
||||
score / repetition_penalty,
|
||||
)
|
||||
logits.scatter_(dim=0, index=previous_tokens, src=score)
|
||||
|
||||
if top_p is not None and top_p < 1.0:
|
||||
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
||||
cum_probs = torch.cumsum(
|
||||
torch.nn.functional.softmax(
|
||||
sorted_logits,
|
||||
dim=-1,
|
||||
),
|
||||
dim=-1,
|
||||
)
|
||||
sorted_indices_to_remove = cum_probs > top_p
|
||||
sorted_indices_to_remove[0] = False # keep at least one option
|
||||
indices_to_remove = sorted_indices_to_remove.scatter(
|
||||
dim=0,
|
||||
index=sorted_indices,
|
||||
src=sorted_indices_to_remove,
|
||||
)
|
||||
logits = logits.masked_fill(indices_to_remove, -float("Inf"))
|
||||
|
||||
logits = logits / max(temperature, 1e-5)
|
||||
|
||||
if top_k is not None:
|
||||
v, _ = torch.topk(logits, top_k)
|
||||
pivot = v.select(-1, -1).unsqueeze(-1)
|
||||
logits = torch.where(logits < pivot, inf_tensor_value, logits)
|
||||
|
||||
probs = torch.nn.functional.softmax(logits, dim=-1)
|
||||
return probs
|
||||
|
||||
|
||||
def multinomial_sample_one_no_sync(
|
||||
probs_sort,
|
||||
): # Does multinomial sampling without a cuda synchronization
|
||||
q = torch.randn_like(probs_sort)
|
||||
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
|
||||
|
||||
|
||||
def sample(
|
||||
logits,
|
||||
previous_tokens,
|
||||
**sampling_kwargs,
|
||||
):
|
||||
probs = logits_to_probs(
|
||||
logits=logits,
|
||||
previous_tokens=previous_tokens,
|
||||
**sampling_kwargs,
|
||||
)
|
||||
idx_next = multinomial_sample_one_no_sync(probs)
|
||||
return idx_next, probs
|
||||
|
||||
|
||||
class OnnxEncoder(nn.Module):
|
||||
def __init__(self, ar_text_embedding, bert_proj, ar_text_position):
|
||||
super().__init__()
|
||||
self.ar_text_embedding = ar_text_embedding
|
||||
self.bert_proj = bert_proj
|
||||
self.ar_text_position = ar_text_position
|
||||
|
||||
def forward(self, x, bert_feature):
|
||||
x = self.ar_text_embedding(x)
|
||||
x = x + self.bert_proj(bert_feature.transpose(1, 2))
|
||||
return self.ar_text_position(x)
|
||||
|
||||
|
||||
class T2SFirstStageDecoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
ar_audio_embedding,
|
||||
ar_audio_position,
|
||||
h,
|
||||
ar_predict_layer,
|
||||
loss_fct,
|
||||
ar_accuracy_metric,
|
||||
top_k,
|
||||
early_stop_num,
|
||||
num_layers,
|
||||
):
|
||||
super().__init__()
|
||||
self.ar_audio_embedding = ar_audio_embedding
|
||||
self.ar_audio_position = ar_audio_position
|
||||
self.h = h
|
||||
self.ar_predict_layer = ar_predict_layer
|
||||
self.loss_fct = loss_fct
|
||||
self.ar_accuracy_metric = ar_accuracy_metric
|
||||
self.top_k = top_k
|
||||
self.early_stop_num = early_stop_num
|
||||
self.num_layers = num_layers
|
||||
|
||||
def forward(self, x, prompt):
|
||||
y = prompt
|
||||
x_example = x[:, :, 0] * 0.0
|
||||
# N, 1, 512
|
||||
cache = {
|
||||
"all_stage": self.num_layers,
|
||||
"k": None,
|
||||
"v": None,
|
||||
"y_emb": None,
|
||||
"first_infer": 1,
|
||||
"stage": 0,
|
||||
}
|
||||
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
|
||||
cache["y_emb"] = y_emb
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
|
||||
y_example = y_pos[:, :, 0] * 0.0
|
||||
x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example).bool()
|
||||
y_attn_mask = torch.ones_like(torch.matmul(y_example.transpose(0, 1), y_example), dtype=torch.int64)
|
||||
y_attn_mask = torch.cumsum(y_attn_mask, dim=1) - torch.cumsum(
|
||||
torch.ones_like(
|
||||
y_example.transpose(0, 1),
|
||||
dtype=torch.int64,
|
||||
),
|
||||
dim=0,
|
||||
)
|
||||
y_attn_mask = y_attn_mask > 0
|
||||
|
||||
x_y_pad = torch.matmul(x_example.transpose(0, 1), y_example).bool()
|
||||
y_x_pad = torch.matmul(y_example.transpose(0, 1), x_example).bool()
|
||||
x_attn_mask_pad = torch.cat([x_attn_mask, torch.ones_like(x_y_pad)], dim=1)
|
||||
y_attn_mask = torch.cat([y_x_pad, y_attn_mask], dim=1)
|
||||
xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
|
||||
cache["k"] = (
|
||||
torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))
|
||||
.unsqueeze(1)
|
||||
.repeat(self.num_layers, 1, 1, 1)
|
||||
)
|
||||
cache["v"] = (
|
||||
torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))
|
||||
.unsqueeze(1)
|
||||
.repeat(self.num_layers, 1, 1, 1)
|
||||
)
|
||||
|
||||
xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
|
||||
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
|
||||
return y, cache["k"], cache["v"], cache["y_emb"], x_example
|
||||
|
||||
|
||||
class T2SStageDecoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
ar_audio_embedding,
|
||||
ar_audio_position,
|
||||
h,
|
||||
ar_predict_layer,
|
||||
loss_fct,
|
||||
ar_accuracy_metric,
|
||||
top_k,
|
||||
early_stop_num,
|
||||
num_layers,
|
||||
):
|
||||
super().__init__()
|
||||
self.ar_audio_embedding = ar_audio_embedding
|
||||
self.ar_audio_position = ar_audio_position
|
||||
self.h = h
|
||||
self.ar_predict_layer = ar_predict_layer
|
||||
self.loss_fct = loss_fct
|
||||
self.ar_accuracy_metric = ar_accuracy_metric
|
||||
self.top_k = top_k
|
||||
self.early_stop_num = early_stop_num
|
||||
self.num_layers = num_layers
|
||||
|
||||
def forward(self, y, k, v, y_emb, x_example):
|
||||
cache = {
|
||||
"all_stage": self.num_layers,
|
||||
"k": torch.nn.functional.pad(k, (0, 0, 0, 0, 0, 1)),
|
||||
"v": torch.nn.functional.pad(v, (0, 0, 0, 0, 0, 1)),
|
||||
"y_emb": y_emb,
|
||||
"first_infer": 0,
|
||||
"stage": 0,
|
||||
}
|
||||
|
||||
y_emb = torch.cat(
|
||||
[
|
||||
cache["y_emb"],
|
||||
self.ar_audio_embedding(y[:, -1:]),
|
||||
],
|
||||
1,
|
||||
)
|
||||
cache["y_emb"] = y_emb
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
|
||||
xy_pos = y_pos[:, -1:]
|
||||
|
||||
y_example = y_pos[:, :, 0] * 0.0
|
||||
|
||||
xy_attn_mask = torch.cat([x_example, y_example], dim=1)
|
||||
xy_attn_mask = torch.zeros_like(xy_attn_mask, dtype=torch.bool)
|
||||
|
||||
xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
|
||||
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
|
||||
return y, cache["k"], cache["v"], cache["y_emb"], logits, samples
|
||||
|
||||
|
||||
class Text2SemanticDecoder(nn.Module):
|
||||
def __init__(self, config, norm_first=False, top_k=3):
|
||||
super(Text2SemanticDecoder, self).__init__()
|
||||
self.model_dim = config["model"]["hidden_dim"]
|
||||
self.embedding_dim = config["model"]["embedding_dim"]
|
||||
self.num_head = config["model"]["head"]
|
||||
self.num_layers = config["model"]["n_layer"]
|
||||
self.norm_first = norm_first
|
||||
self.vocab_size = config["model"]["vocab_size"]
|
||||
self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
|
||||
self.p_dropout = float(config["model"]["dropout"])
|
||||
self.EOS = config["model"]["EOS"]
|
||||
self.norm_first = norm_first
|
||||
assert self.EOS == self.vocab_size - 1
|
||||
self.bert_proj = nn.Linear(1024, self.embedding_dim)
|
||||
self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size, self.p_dropout)
|
||||
self.ar_text_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
|
||||
self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size, self.p_dropout)
|
||||
self.ar_audio_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
|
||||
self.h = TransformerEncoder(
|
||||
TransformerEncoderLayer(
|
||||
d_model=self.model_dim,
|
||||
nhead=self.num_head,
|
||||
dim_feedforward=self.model_dim * 4,
|
||||
dropout=0.1,
|
||||
batch_first=True,
|
||||
norm_first=norm_first,
|
||||
),
|
||||
num_layers=self.num_layers,
|
||||
norm=LayerNorm(self.model_dim) if norm_first else None,
|
||||
)
|
||||
self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
|
||||
self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
|
||||
self.ar_accuracy_metric = MulticlassAccuracy(
|
||||
self.vocab_size,
|
||||
top_k=top_k,
|
||||
average="micro",
|
||||
multidim_average="global",
|
||||
ignore_index=self.EOS,
|
||||
)
|
||||
self.top_k = torch.LongTensor([1])
|
||||
self.early_stop_num = torch.LongTensor([-1])
|
||||
|
||||
def init_onnx(self):
|
||||
self.onnx_encoder = OnnxEncoder(self.ar_text_embedding, self.bert_proj, self.ar_text_position)
|
||||
self.first_stage_decoder = T2SFirstStageDecoder(
|
||||
self.ar_audio_embedding,
|
||||
self.ar_audio_position,
|
||||
self.h,
|
||||
self.ar_predict_layer,
|
||||
self.loss_fct,
|
||||
self.ar_accuracy_metric,
|
||||
self.top_k,
|
||||
self.early_stop_num,
|
||||
self.num_layers,
|
||||
)
|
||||
self.stage_decoder = T2SStageDecoder(
|
||||
self.ar_audio_embedding,
|
||||
self.ar_audio_position,
|
||||
self.h,
|
||||
self.ar_predict_layer,
|
||||
self.loss_fct,
|
||||
self.ar_accuracy_metric,
|
||||
self.top_k,
|
||||
self.early_stop_num,
|
||||
self.num_layers,
|
||||
)
|
||||
|
||||
def forward(self, x, prompts, bert_feature):
|
||||
early_stop_num = self.early_stop_num
|
||||
prefix_len = prompts.shape[1]
|
||||
|
||||
x = self.onnx_encoder(x, bert_feature)
|
||||
y, k, v, y_emb, stage, x_example = self.first_stage_decoder(x, prompts)
|
||||
|
||||
stop = False
|
||||
for idx in range(1, 1500):
|
||||
enco = self.stage_decoder(y, k, v, y_emb, stage, x_example)
|
||||
y, k, v, y_emb, stage, logits, samples = enco
|
||||
if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
|
||||
stop = True
|
||||
if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
|
||||
stop = True
|
||||
if stop:
|
||||
break
|
||||
y[0, -1] = 0
|
||||
return y, idx
|
||||
|
||||
def infer(self, x, prompts, bert_feature):
|
||||
top_k = self.top_k
|
||||
early_stop_num = self.early_stop_num
|
||||
|
||||
x = self.onnx_encoder(x, bert_feature)
|
||||
|
||||
y = prompts
|
||||
prefix_len = y.shape[1]
|
||||
x_len = x.shape[1]
|
||||
x_example = x[:, :, 0] * 0.0
|
||||
x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example)
|
||||
x_attn_mask = torch.zeros_like(x_attn_mask, dtype=torch.bool)
|
||||
|
||||
stop = False
|
||||
cache = {
|
||||
"all_stage": self.num_layers,
|
||||
"k": [None] * self.num_layers,
|
||||
"v": [None] * self.num_layers,
|
||||
"y_emb": None,
|
||||
"first_infer": 1,
|
||||
"stage": 0,
|
||||
}
|
||||
for idx in range(1500):
|
||||
if cache["first_infer"] == 1:
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
else:
|
||||
y_emb = torch.cat([cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1)
|
||||
cache["y_emb"] = y_emb
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
if cache["first_infer"] == 1:
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
else:
|
||||
xy_pos = y_pos[:, -1:]
|
||||
y_len = y_pos.shape[1]
|
||||
if cache["first_infer"] == 1:
|
||||
x_attn_mask_pad = F.pad(x_attn_mask, (0, y_len), value=True)
|
||||
y_attn_mask = F.pad(
|
||||
torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
|
||||
(x_len, 0),
|
||||
value=False,
|
||||
)
|
||||
xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
|
||||
else:
|
||||
xy_attn_mask = torch.zeros((1, x_len + y_len), dtype=torch.bool)
|
||||
xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
samples = sample(logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
|
||||
if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
|
||||
stop = True
|
||||
if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
|
||||
stop = True
|
||||
if stop:
|
||||
if prompts.shape[1] == y.shape[1]:
|
||||
y = torch.concat([y, torch.zeros_like(samples)], dim=1)
|
||||
break
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
cache["first_infer"] = 0
|
||||
return y, idx
|
||||
282
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/models/utils.py
Normal file
282
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/models/utils.py
Normal file
@@ -0,0 +1,282 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def sequence_mask(length, max_length=None):
|
||||
if max_length is None:
|
||||
max_length = length.max()
|
||||
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
||||
return x.unsqueeze(0) < length.unsqueeze(1)
|
||||
|
||||
|
||||
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
|
||||
"""
|
||||
Args:
|
||||
lengths:
|
||||
A 1-D tensor containing sentence lengths.
|
||||
max_len:
|
||||
The length of masks.
|
||||
Returns:
|
||||
Return a 2-D bool tensor, where masked positions
|
||||
are filled with `True` and non-masked positions are
|
||||
filled with `False`.
|
||||
|
||||
#>>> lengths = torch.tensor([1, 3, 2, 5])
|
||||
#>>> make_pad_mask(lengths)
|
||||
tensor([[False, True, True, True, True],
|
||||
[False, False, False, True, True],
|
||||
[False, False, True, True, True],
|
||||
[False, False, False, False, False]])
|
||||
"""
|
||||
assert lengths.ndim == 1, lengths.ndim
|
||||
max_len = max(max_len, lengths.max())
|
||||
n = lengths.size(0)
|
||||
seq_range = torch.arange(0, max_len, device=lengths.device)
|
||||
expaned_lengths = seq_range.unsqueeze(0).expand(n, max_len)
|
||||
|
||||
return expaned_lengths >= lengths.unsqueeze(-1)
|
||||
|
||||
|
||||
def make_pad_mask_left(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
|
||||
"""
|
||||
Args:
|
||||
lengths:
|
||||
A 1-D tensor containing sentence lengths.
|
||||
max_len:
|
||||
The length of masks.
|
||||
Returns:
|
||||
Return a 2-D bool tensor, where masked positions
|
||||
are filled with `True` and non-masked positions are
|
||||
filled with `False`.
|
||||
|
||||
#>>> lengths = torch.tensor([1, 3, 2, 5])
|
||||
#>>> make_pad_mask(lengths)
|
||||
tensor(
|
||||
[
|
||||
[True, True, False],
|
||||
[True, False, False],
|
||||
[True, True, False],
|
||||
...
|
||||
]
|
||||
)
|
||||
"""
|
||||
assert lengths.ndim == 1, lengths.ndim
|
||||
max_len = max(max_len, lengths.max())
|
||||
n = lengths.size(0)
|
||||
seq_range = torch.arange(0, max_len, device=lengths.device)
|
||||
expaned_lengths = seq_range.unsqueeze(0).repeat(n, 1)
|
||||
expaned_lengths -= (max_len - lengths).unsqueeze(-1)
|
||||
|
||||
return expaned_lengths < 0
|
||||
|
||||
|
||||
# https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py
|
||||
def top_k_top_p_filtering(
|
||||
logits,
|
||||
top_k=0,
|
||||
top_p=1.0,
|
||||
filter_value=-float("Inf"),
|
||||
min_tokens_to_keep=1,
|
||||
):
|
||||
"""Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
|
||||
Args:
|
||||
logits: logits distribution shape (batch size, vocabulary size)
|
||||
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
|
||||
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
|
||||
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
|
||||
Make sure we keep at least min_tokens_to_keep per batch example in the output
|
||||
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
|
||||
"""
|
||||
if top_k > 0:
|
||||
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
|
||||
# Remove all tokens with a probability less than the last token of the top-k
|
||||
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
||||
logits[indices_to_remove] = filter_value
|
||||
|
||||
if top_p < 1.0:
|
||||
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
||||
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
||||
|
||||
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
|
||||
sorted_indices_to_remove = cumulative_probs > top_p
|
||||
if min_tokens_to_keep > 1:
|
||||
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
|
||||
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
|
||||
# Shift the indices to the right to keep also the first token above the threshold
|
||||
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
||||
sorted_indices_to_remove[..., 0] = 0
|
||||
|
||||
# scatter sorted tensors to original indexing
|
||||
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
||||
logits[indices_to_remove] = filter_value
|
||||
return logits
|
||||
|
||||
|
||||
def topk_sampling(logits, top_k=10, top_p=1.0, temperature=1.0):
|
||||
# temperature: (`optional`) float
|
||||
# The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
|
||||
# top_k: (`optional`) int
|
||||
# The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
|
||||
# top_p: (`optional`) float
|
||||
# The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
|
||||
|
||||
# Temperature (higher temperature => more likely to sample low probability tokens)
|
||||
if temperature != 1.0:
|
||||
logits = logits / temperature
|
||||
# Top-p/top-k filtering
|
||||
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
|
||||
# Sample
|
||||
token = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
|
||||
return token
|
||||
|
||||
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def multinomial_sample_one_no_sync(
|
||||
probs_sort,
|
||||
): # Does multinomial sampling without a cuda synchronization
|
||||
q = torch.empty_like(probs_sort).exponential_(1)
|
||||
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
|
||||
|
||||
|
||||
def logits_to_probs(
|
||||
logits,
|
||||
previous_tokens: Optional[torch.Tensor] = None,
|
||||
temperature: float = 1.0,
|
||||
top_k: Optional[int] = None,
|
||||
top_p: Optional[int] = None,
|
||||
repetition_penalty: float = 1.0,
|
||||
):
|
||||
# if previous_tokens is not None:
|
||||
# previous_tokens = previous_tokens.squeeze()
|
||||
# print(logits.shape,previous_tokens.shape)
|
||||
# pdb.set_trace()
|
||||
if previous_tokens is not None and repetition_penalty != 1.0:
|
||||
previous_tokens = previous_tokens.long()
|
||||
score = torch.gather(logits, dim=1, index=previous_tokens)
|
||||
score = torch.where(
|
||||
score < 0,
|
||||
score * repetition_penalty,
|
||||
score / repetition_penalty,
|
||||
)
|
||||
logits.scatter_(dim=1, index=previous_tokens, src=score)
|
||||
|
||||
if top_p is not None and top_p < 1.0:
|
||||
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
||||
cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
|
||||
sorted_indices_to_remove = cum_probs > top_p
|
||||
sorted_indices_to_remove[:, 0] = False # keep at least one option
|
||||
indices_to_remove = sorted_indices_to_remove.scatter(
|
||||
dim=1,
|
||||
index=sorted_indices,
|
||||
src=sorted_indices_to_remove,
|
||||
)
|
||||
logits = logits.masked_fill(indices_to_remove, -float("Inf"))
|
||||
|
||||
logits = logits / max(temperature, 1e-5)
|
||||
|
||||
if top_k is not None:
|
||||
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
|
||||
pivot = v[:, -1].unsqueeze(-1)
|
||||
logits = torch.where(logits < pivot, -float("Inf"), logits)
|
||||
|
||||
probs = torch.nn.functional.softmax(logits, dim=-1)
|
||||
return probs
|
||||
|
||||
|
||||
def sample(
|
||||
logits,
|
||||
previous_tokens: Optional[torch.Tensor] = None,
|
||||
**sampling_kwargs,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
probs = logits_to_probs(logits=logits, previous_tokens=previous_tokens, **sampling_kwargs)
|
||||
idx_next = multinomial_sample_one_no_sync(probs)
|
||||
return idx_next, probs
|
||||
|
||||
|
||||
def dpo_loss(
|
||||
policy_chosen_logps: torch.FloatTensor,
|
||||
policy_rejected_logps: torch.FloatTensor,
|
||||
reference_chosen_logps: torch.FloatTensor,
|
||||
reference_rejected_logps: torch.FloatTensor,
|
||||
beta: float,
|
||||
reference_free: bool = False,
|
||||
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
||||
pi_logratios = policy_chosen_logps - policy_rejected_logps
|
||||
ref_logratios = reference_chosen_logps - reference_rejected_logps
|
||||
|
||||
if reference_free:
|
||||
ref_logratios = 0
|
||||
|
||||
logits = pi_logratios - ref_logratios
|
||||
|
||||
losses = -F.logsigmoid(beta * logits)
|
||||
chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach()
|
||||
rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach()
|
||||
|
||||
return losses.mean(), chosen_rewards, rejected_rewards
|
||||
|
||||
|
||||
def get_batch_logps(
|
||||
logits_target: torch.FloatTensor,
|
||||
logits_reject: torch.FloatTensor,
|
||||
labels_target: torch.LongTensor,
|
||||
labels_reject: torch.LongTensor,
|
||||
average_log_prob: bool = False,
|
||||
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
|
||||
# dummy token; we'll ignore the losses on these tokens later
|
||||
|
||||
per_token_logps_target = torch.gather(
|
||||
logits_target.log_softmax(-1), dim=2, index=labels_target.unsqueeze(2)
|
||||
).squeeze(2)
|
||||
per_token_logps_reject = torch.gather(
|
||||
logits_reject.log_softmax(-1), dim=2, index=labels_reject.unsqueeze(2)
|
||||
).squeeze(2)
|
||||
|
||||
return per_token_logps_target.sum(-1), per_token_logps_reject.sum(-1)
|
||||
|
||||
|
||||
def make_reject_y(y_o, y_lens):
|
||||
def repeat_P(y):
|
||||
range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
|
||||
pre = y[: range_idx[0]]
|
||||
shf = y[range_idx[1] :]
|
||||
range_text = y[range_idx[0] : range_idx[1]]
|
||||
new_y = torch.cat([pre, range_text, range_text, shf])
|
||||
return new_y
|
||||
|
||||
def lost_P(y):
|
||||
range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
|
||||
pre = y[: range_idx[0]]
|
||||
shf = y[range_idx[1] :]
|
||||
range_text = y[range_idx[0] : range_idx[1]]
|
||||
new_y = torch.cat([pre, shf])
|
||||
return new_y
|
||||
|
||||
bs = len(y_lens)
|
||||
reject_y = []
|
||||
reject_y_lens = []
|
||||
for b in range(bs):
|
||||
process_item_idx = torch.randint(0, 1, size=(1,))[0]
|
||||
if process_item_idx == 0:
|
||||
new_y = repeat_P(y_o[b])
|
||||
reject_y.append(new_y)
|
||||
reject_y_lens.append(len(new_y))
|
||||
elif process_item_idx == 1:
|
||||
new_y = lost_P(y_o[b])
|
||||
reject_y.append(new_y)
|
||||
reject_y_lens.append(len(new_y))
|
||||
max_length = max(reject_y_lens)
|
||||
for b in range(bs):
|
||||
pad_length = max_length - reject_y_lens[b]
|
||||
reject_y[b] = torch.cat([reject_y[b], torch.zeros(pad_length, dtype=y_o.dtype, device=y_o.device)], dim=0)
|
||||
|
||||
reject_y = torch.stack(reject_y, dim=0)
|
||||
reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device)
|
||||
|
||||
return reject_y, reject_y_lens
|
||||
@@ -0,0 +1,413 @@
|
||||
# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
from torch.nn import Linear, Module
|
||||
from torch.nn import functional as F
|
||||
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
|
||||
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from AR.modules.patched_mha_with_cache import multi_head_attention_forward_patched
|
||||
|
||||
F.multi_head_attention_forward = multi_head_attention_forward_patched
|
||||
|
||||
|
||||
class MultiheadAttention(Module):
|
||||
r"""Allows the model to jointly attend to information
|
||||
from different representation subspaces as described in the paper:
|
||||
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
|
||||
|
||||
Multi-Head Attention is defined as:
|
||||
|
||||
.. math::
|
||||
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
|
||||
|
||||
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
|
||||
|
||||
``forward()`` will use a special optimized implementation if all of the following
|
||||
conditions are met:
|
||||
|
||||
- self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
|
||||
restriction will be loosened in the future.)
|
||||
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
|
||||
- training is disabled (using ``.eval()``)
|
||||
- dropout is 0
|
||||
- ``add_bias_kv`` is ``False``
|
||||
- ``add_zero_attn`` is ``False``
|
||||
- ``batch_first`` is ``True`` and the input is batched
|
||||
- ``kdim`` and ``vdim`` are equal to ``embed_dim``
|
||||
- at most one of ``key_padding_mask`` or ``attn_mask`` is passed
|
||||
- if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
|
||||
nor ``attn_mask`` is passed
|
||||
|
||||
If the optimized implementation is in use, a
|
||||
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
|
||||
``query``/``key``/``value`` to represent padding more efficiently than using a
|
||||
padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
|
||||
will be returned, and an additional speedup proportional to the fraction of the input
|
||||
that is padding can be expected.
|
||||
|
||||
Args:
|
||||
embed_dim: Total dimension of the model.
|
||||
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
|
||||
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
|
||||
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
|
||||
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
|
||||
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
|
||||
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
|
||||
Default: ``False``.
|
||||
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
|
||||
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
|
||||
batch_first: If ``True``, then the input and output tensors are provided
|
||||
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
|
||||
|
||||
Examples::
|
||||
|
||||
>>> # xdoctest: +SKIP
|
||||
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
|
||||
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
|
||||
|
||||
"""
|
||||
|
||||
__constants__ = ["batch_first"]
|
||||
bias_k: Optional[torch.Tensor]
|
||||
bias_v: Optional[torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim,
|
||||
num_heads,
|
||||
dropout=0.0,
|
||||
bias=True,
|
||||
add_bias_kv=False,
|
||||
add_zero_attn=False,
|
||||
kdim=None,
|
||||
vdim=None,
|
||||
batch_first=False,
|
||||
linear1_cls=Linear,
|
||||
linear2_cls=Linear,
|
||||
device=None,
|
||||
dtype=None,
|
||||
) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super(MultiheadAttention, self).__init__()
|
||||
self.embed_dim = embed_dim
|
||||
self.kdim = kdim if kdim is not None else embed_dim
|
||||
self.vdim = vdim if vdim is not None else embed_dim
|
||||
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
|
||||
|
||||
self.num_heads = num_heads
|
||||
self.dropout = dropout
|
||||
self.batch_first = batch_first
|
||||
self.head_dim = embed_dim // num_heads
|
||||
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
|
||||
|
||||
if add_bias_kv:
|
||||
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
|
||||
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
|
||||
else:
|
||||
self.bias_k = self.bias_v = None
|
||||
|
||||
if linear1_cls == Linear:
|
||||
if not self._qkv_same_embed_dim:
|
||||
self.q_proj_weight = Parameter(
|
||||
torch.empty((embed_dim, embed_dim), **factory_kwargs),
|
||||
)
|
||||
self.k_proj_weight = Parameter(
|
||||
torch.empty((embed_dim, self.kdim), **factory_kwargs),
|
||||
)
|
||||
self.v_proj_weight = Parameter(
|
||||
torch.empty((embed_dim, self.vdim), **factory_kwargs),
|
||||
)
|
||||
self.register_parameter("in_proj_weight", None)
|
||||
else:
|
||||
self.in_proj_weight = Parameter(
|
||||
torch.empty((3 * embed_dim, embed_dim), **factory_kwargs),
|
||||
)
|
||||
self.register_parameter("q_proj_weight", None)
|
||||
self.register_parameter("k_proj_weight", None)
|
||||
self.register_parameter("v_proj_weight", None)
|
||||
|
||||
if bias:
|
||||
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
|
||||
else:
|
||||
self.register_parameter("in_proj_bias", None)
|
||||
self.out_proj = NonDynamicallyQuantizableLinear(
|
||||
embed_dim,
|
||||
embed_dim,
|
||||
bias=bias,
|
||||
**factory_kwargs,
|
||||
)
|
||||
|
||||
self._reset_parameters()
|
||||
else:
|
||||
if not self._qkv_same_embed_dim:
|
||||
raise NotImplementedError
|
||||
else:
|
||||
self.in_proj_linear = linear1_cls(
|
||||
embed_dim,
|
||||
3 * embed_dim,
|
||||
bias=bias,
|
||||
**factory_kwargs,
|
||||
)
|
||||
self.in_proj_weight = self.in_proj_linear.weight
|
||||
|
||||
self.register_parameter("q_proj_weight", None)
|
||||
self.register_parameter("k_proj_weight", None)
|
||||
self.register_parameter("v_proj_weight", None)
|
||||
|
||||
if bias:
|
||||
self.in_proj_bias = self.in_proj_linear.bias
|
||||
else:
|
||||
self.register_parameter("in_proj_bias", None)
|
||||
|
||||
self.out_proj = linear2_cls(
|
||||
embed_dim,
|
||||
embed_dim,
|
||||
bias=bias,
|
||||
**factory_kwargs,
|
||||
)
|
||||
|
||||
if self.bias_k is not None:
|
||||
xavier_normal_(self.bias_k)
|
||||
if self.bias_v is not None:
|
||||
xavier_normal_(self.bias_v)
|
||||
|
||||
self.add_zero_attn = add_zero_attn
|
||||
|
||||
def _reset_parameters(self):
|
||||
if self._qkv_same_embed_dim:
|
||||
xavier_uniform_(self.in_proj_weight)
|
||||
else:
|
||||
xavier_uniform_(self.q_proj_weight)
|
||||
xavier_uniform_(self.k_proj_weight)
|
||||
xavier_uniform_(self.v_proj_weight)
|
||||
|
||||
if self.in_proj_bias is not None:
|
||||
constant_(self.in_proj_bias, 0.0)
|
||||
constant_(self.out_proj.bias, 0.0)
|
||||
|
||||
if self.bias_k is not None:
|
||||
xavier_normal_(self.bias_k)
|
||||
if self.bias_v is not None:
|
||||
xavier_normal_(self.bias_v)
|
||||
|
||||
def __setstate__(self, state):
|
||||
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
|
||||
if "_qkv_same_embed_dim" not in state:
|
||||
state["_qkv_same_embed_dim"] = True
|
||||
|
||||
super(MultiheadAttention, self).__setstate__(state)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
query: Tensor,
|
||||
key: Tensor,
|
||||
value: Tensor,
|
||||
key_padding_mask: Optional[Tensor] = None,
|
||||
need_weights: bool = True,
|
||||
attn_mask: Optional[Tensor] = None,
|
||||
average_attn_weights: bool = True,
|
||||
cache=None,
|
||||
) -> Tuple[Tensor, Optional[Tensor]]:
|
||||
r"""
|
||||
Args:
|
||||
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
|
||||
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
|
||||
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
|
||||
Queries are compared against key-value pairs to produce the output.
|
||||
See "Attention Is All You Need" for more details.
|
||||
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
|
||||
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
|
||||
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
|
||||
See "Attention Is All You Need" for more details.
|
||||
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
|
||||
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
|
||||
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
|
||||
See "Attention Is All You Need" for more details.
|
||||
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
|
||||
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
|
||||
Binary and byte masks are supported.
|
||||
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
|
||||
the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
|
||||
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
|
||||
Default: ``True``.
|
||||
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
|
||||
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
|
||||
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
|
||||
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
|
||||
Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
|
||||
corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
|
||||
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
|
||||
the attention weight.
|
||||
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
|
||||
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
|
||||
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
|
||||
|
||||
Outputs:
|
||||
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
|
||||
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
|
||||
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
|
||||
embedding dimension ``embed_dim``.
|
||||
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
|
||||
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
|
||||
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
|
||||
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
|
||||
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
|
||||
|
||||
.. note::
|
||||
`batch_first` argument is ignored for unbatched inputs.
|
||||
"""
|
||||
is_batched = query.dim() == 3
|
||||
if key_padding_mask is not None:
|
||||
_kpm_dtype = key_padding_mask.dtype
|
||||
if _kpm_dtype != torch.bool and not torch.is_floating_point(
|
||||
key_padding_mask,
|
||||
):
|
||||
raise AssertionError("only bool and floating types of key_padding_mask are supported")
|
||||
why_not_fast_path = ""
|
||||
if not is_batched:
|
||||
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
|
||||
elif query is not key or key is not value:
|
||||
# When lifting this restriction, don't forget to either
|
||||
# enforce that the dtypes all match or test cases where
|
||||
# they don't!
|
||||
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
|
||||
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
|
||||
why_not_fast_path = (
|
||||
f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
|
||||
)
|
||||
elif self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype:
|
||||
# this case will fail anyway, but at least they'll get a useful error message.
|
||||
why_not_fast_path = (
|
||||
f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
|
||||
)
|
||||
elif self.training:
|
||||
why_not_fast_path = "training is enabled"
|
||||
elif not self.batch_first:
|
||||
why_not_fast_path = "batch_first was not True"
|
||||
elif self.bias_k is not None:
|
||||
why_not_fast_path = "self.bias_k was not None"
|
||||
elif self.bias_v is not None:
|
||||
why_not_fast_path = "self.bias_v was not None"
|
||||
elif self.dropout:
|
||||
why_not_fast_path = f"dropout was {self.dropout}, required zero"
|
||||
elif self.add_zero_attn:
|
||||
why_not_fast_path = "add_zero_attn was enabled"
|
||||
elif not self._qkv_same_embed_dim:
|
||||
why_not_fast_path = "_qkv_same_embed_dim was not True"
|
||||
elif attn_mask is not None:
|
||||
why_not_fast_path = "attn_mask was not None"
|
||||
elif query.is_nested and key_padding_mask is not None:
|
||||
why_not_fast_path = "key_padding_mask is not supported with NestedTensor input"
|
||||
elif self.num_heads % 2 == 1:
|
||||
why_not_fast_path = "num_heads is odd"
|
||||
elif torch.is_autocast_enabled():
|
||||
why_not_fast_path = "autocast is enabled"
|
||||
|
||||
if not why_not_fast_path:
|
||||
tensor_args = (
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
self.in_proj_weight,
|
||||
self.in_proj_bias,
|
||||
self.out_proj.weight,
|
||||
self.out_proj.bias,
|
||||
)
|
||||
# We have to use list comprehensions below because TorchScript does not support
|
||||
# generator expressions.
|
||||
if torch.overrides.has_torch_function(tensor_args):
|
||||
why_not_fast_path = "some Tensor argument has_torch_function"
|
||||
elif not all([(x is None or x.is_cuda or "cpu" in str(x.device)) for x in tensor_args]):
|
||||
why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
|
||||
elif torch.is_grad_enabled() and any([x is not None and x.requires_grad for x in tensor_args]):
|
||||
why_not_fast_path = "grad is enabled and at least one of query or the input/output projection weights or biases requires_grad"
|
||||
if not why_not_fast_path:
|
||||
return torch._native_multi_head_attention(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
self.embed_dim,
|
||||
self.num_heads,
|
||||
self.in_proj_weight,
|
||||
self.in_proj_bias,
|
||||
self.out_proj.weight,
|
||||
self.out_proj.bias,
|
||||
key_padding_mask if key_padding_mask is not None else attn_mask,
|
||||
need_weights,
|
||||
average_attn_weights,
|
||||
1 if key_padding_mask is not None else 0 if attn_mask is not None else None,
|
||||
)
|
||||
|
||||
any_nested = query.is_nested or key.is_nested or value.is_nested
|
||||
assert not any_nested, (
|
||||
"MultiheadAttention does not support NestedTensor outside of its fast path. "
|
||||
+ f"The fast path was not hit because {why_not_fast_path}"
|
||||
)
|
||||
|
||||
if self.batch_first and is_batched:
|
||||
# make sure that the transpose op does not affect the "is" property
|
||||
if key is value:
|
||||
if query is key:
|
||||
query = key = value = query.transpose(1, 0)
|
||||
else:
|
||||
query, key = [x.transpose(1, 0) for x in (query, key)]
|
||||
value = key
|
||||
else:
|
||||
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
|
||||
|
||||
if not self._qkv_same_embed_dim:
|
||||
attn_output, attn_output_weights = F.multi_head_attention_forward(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
self.embed_dim,
|
||||
self.num_heads,
|
||||
self.in_proj_weight,
|
||||
self.in_proj_bias,
|
||||
self.bias_k,
|
||||
self.bias_v,
|
||||
self.add_zero_attn,
|
||||
self.dropout,
|
||||
self.out_proj.weight,
|
||||
self.out_proj.bias,
|
||||
training=self.training,
|
||||
key_padding_mask=key_padding_mask,
|
||||
need_weights=need_weights,
|
||||
attn_mask=attn_mask,
|
||||
use_separate_proj_weight=True,
|
||||
q_proj_weight=self.q_proj_weight,
|
||||
k_proj_weight=self.k_proj_weight,
|
||||
v_proj_weight=self.v_proj_weight,
|
||||
average_attn_weights=average_attn_weights,
|
||||
cache=cache,
|
||||
)
|
||||
else:
|
||||
attn_output, attn_output_weights = F.multi_head_attention_forward(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
self.embed_dim,
|
||||
self.num_heads,
|
||||
self.in_proj_weight,
|
||||
self.in_proj_bias,
|
||||
self.bias_k,
|
||||
self.bias_v,
|
||||
self.add_zero_attn,
|
||||
self.dropout,
|
||||
self.out_proj.weight,
|
||||
self.out_proj.bias,
|
||||
training=self.training,
|
||||
key_padding_mask=key_padding_mask,
|
||||
need_weights=need_weights,
|
||||
attn_mask=attn_mask,
|
||||
average_attn_weights=average_attn_weights,
|
||||
cache=cache,
|
||||
)
|
||||
if self.batch_first and is_batched:
|
||||
return attn_output.transpose(1, 0), attn_output_weights
|
||||
else:
|
||||
return attn_output, attn_output_weights
|
||||
@@ -0,0 +1,188 @@
|
||||
# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
from torch.nn import Linear, Module
|
||||
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
|
||||
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from AR.modules.patched_mha_with_cache_onnx import multi_head_attention_forward_patched
|
||||
|
||||
|
||||
class MultiheadAttention(Module):
|
||||
__constants__ = ["batch_first"]
|
||||
bias_k: Optional[torch.Tensor]
|
||||
bias_v: Optional[torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim,
|
||||
num_heads,
|
||||
dropout=0.0,
|
||||
bias=True,
|
||||
add_bias_kv=False,
|
||||
add_zero_attn=False,
|
||||
kdim=None,
|
||||
vdim=None,
|
||||
batch_first=False,
|
||||
linear1_cls=Linear,
|
||||
linear2_cls=Linear,
|
||||
device=None,
|
||||
dtype=None,
|
||||
) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super(MultiheadAttention, self).__init__()
|
||||
self.embed_dim = embed_dim
|
||||
self.kdim = kdim if kdim is not None else embed_dim
|
||||
self.vdim = vdim if vdim is not None else embed_dim
|
||||
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
|
||||
|
||||
self.num_heads = num_heads
|
||||
self.dropout = dropout
|
||||
self.batch_first = batch_first
|
||||
self.head_dim = embed_dim // num_heads
|
||||
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
|
||||
|
||||
if add_bias_kv:
|
||||
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
|
||||
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
|
||||
else:
|
||||
self.bias_k = self.bias_v = None
|
||||
|
||||
if linear1_cls == Linear:
|
||||
if not self._qkv_same_embed_dim:
|
||||
self.q_proj_weight = Parameter(
|
||||
torch.empty(
|
||||
(embed_dim, embed_dim),
|
||||
**factory_kwargs,
|
||||
)
|
||||
)
|
||||
self.k_proj_weight = Parameter(
|
||||
torch.empty(
|
||||
(embed_dim, self.kdim),
|
||||
**factory_kwargs,
|
||||
)
|
||||
)
|
||||
self.v_proj_weight = Parameter(
|
||||
torch.empty(
|
||||
(embed_dim, self.vdim),
|
||||
**factory_kwargs,
|
||||
)
|
||||
)
|
||||
self.register_parameter("in_proj_weight", None)
|
||||
else:
|
||||
self.in_proj_weight = Parameter(
|
||||
torch.empty(
|
||||
(3 * embed_dim, embed_dim),
|
||||
**factory_kwargs,
|
||||
)
|
||||
)
|
||||
self.register_parameter("q_proj_weight", None)
|
||||
self.register_parameter("k_proj_weight", None)
|
||||
self.register_parameter("v_proj_weight", None)
|
||||
|
||||
if bias:
|
||||
self.in_proj_bias = Parameter(
|
||||
torch.empty(3 * embed_dim, **factory_kwargs),
|
||||
)
|
||||
else:
|
||||
self.register_parameter("in_proj_bias", None)
|
||||
self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
|
||||
|
||||
self._reset_parameters()
|
||||
else:
|
||||
if not self._qkv_same_embed_dim:
|
||||
raise NotImplementedError
|
||||
else:
|
||||
self.in_proj_linear = linear1_cls(
|
||||
embed_dim,
|
||||
3 * embed_dim,
|
||||
bias=bias,
|
||||
**factory_kwargs,
|
||||
)
|
||||
self.in_proj_weight = self.in_proj_linear.weight
|
||||
|
||||
self.register_parameter("q_proj_weight", None)
|
||||
self.register_parameter("k_proj_weight", None)
|
||||
self.register_parameter("v_proj_weight", None)
|
||||
|
||||
if bias:
|
||||
self.in_proj_bias = self.in_proj_linear.bias
|
||||
else:
|
||||
self.register_parameter("in_proj_bias", None)
|
||||
|
||||
self.out_proj = linear2_cls(
|
||||
embed_dim,
|
||||
embed_dim,
|
||||
bias=bias,
|
||||
**factory_kwargs,
|
||||
)
|
||||
|
||||
if self.bias_k is not None:
|
||||
xavier_normal_(self.bias_k)
|
||||
if self.bias_v is not None:
|
||||
xavier_normal_(self.bias_v)
|
||||
|
||||
self.add_zero_attn = add_zero_attn
|
||||
|
||||
def _reset_parameters(self):
|
||||
if self._qkv_same_embed_dim:
|
||||
xavier_uniform_(self.in_proj_weight)
|
||||
else:
|
||||
xavier_uniform_(self.q_proj_weight)
|
||||
xavier_uniform_(self.k_proj_weight)
|
||||
xavier_uniform_(self.v_proj_weight)
|
||||
|
||||
if self.in_proj_bias is not None:
|
||||
constant_(self.in_proj_bias, 0.0)
|
||||
constant_(self.out_proj.bias, 0.0)
|
||||
|
||||
if self.bias_k is not None:
|
||||
xavier_normal_(self.bias_k)
|
||||
if self.bias_v is not None:
|
||||
xavier_normal_(self.bias_v)
|
||||
|
||||
def __setstate__(self, state):
|
||||
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
|
||||
if "_qkv_same_embed_dim" not in state:
|
||||
state["_qkv_same_embed_dim"] = True
|
||||
|
||||
super(MultiheadAttention, self).__setstate__(state)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
query: Tensor,
|
||||
key: Tensor,
|
||||
value: Tensor,
|
||||
key_padding_mask: Optional[Tensor] = None,
|
||||
need_weights: bool = True,
|
||||
attn_mask: Optional[Tensor] = None,
|
||||
average_attn_weights: bool = True,
|
||||
cache=None,
|
||||
) -> Tuple[Tensor, Optional[Tensor]]:
|
||||
any_nested = query.is_nested or key.is_nested or value.is_nested
|
||||
query = key = value = query.transpose(1, 0)
|
||||
attn_output = multi_head_attention_forward_patched(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
self.embed_dim,
|
||||
self.num_heads,
|
||||
self.in_proj_weight,
|
||||
self.in_proj_bias,
|
||||
self.bias_k,
|
||||
self.bias_v,
|
||||
self.add_zero_attn,
|
||||
self.dropout,
|
||||
self.out_proj.weight,
|
||||
self.out_proj.bias,
|
||||
training=self.training,
|
||||
key_padding_mask=key_padding_mask,
|
||||
need_weights=need_weights,
|
||||
attn_mask=attn_mask,
|
||||
average_attn_weights=average_attn_weights,
|
||||
cache=cache,
|
||||
)
|
||||
return attn_output.transpose(1, 0)
|
||||
@@ -0,0 +1,78 @@
|
||||
# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
|
||||
import math
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
|
||||
class TokenEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
vocab_size: int,
|
||||
dropout: float = 0.0,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.vocab_size = vocab_size
|
||||
self.embedding_dim = embedding_dim
|
||||
|
||||
self.dropout = torch.nn.Dropout(p=dropout)
|
||||
self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
|
||||
|
||||
@property
|
||||
def weight(self) -> torch.Tensor:
|
||||
return self.word_embeddings.weight
|
||||
|
||||
def embedding(self, index: int) -> torch.Tensor:
|
||||
return self.word_embeddings.weight[index : index + 1]
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
x = self.word_embeddings(x)
|
||||
x = self.dropout(x)
|
||||
return x
|
||||
|
||||
|
||||
class SinePositionalEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
dropout: float = 0.0,
|
||||
scale: bool = False,
|
||||
alpha: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.embedding_dim = embedding_dim
|
||||
self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
|
||||
self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
|
||||
self.dropout = torch.nn.Dropout(p=dropout)
|
||||
|
||||
self.reverse = False
|
||||
self.pe = None
|
||||
self.extend_pe(torch.tensor(0.0).expand(1, 4000))
|
||||
|
||||
def extend_pe(self, x):
|
||||
"""Reset the positional encodings."""
|
||||
if self.pe is not None:
|
||||
if self.pe.size(1) >= x.size(1):
|
||||
if self.pe.dtype != x.dtype or self.pe.device != x.device:
|
||||
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
|
||||
return
|
||||
pe = torch.zeros(x.size(1), self.embedding_dim)
|
||||
if self.reverse:
|
||||
position = torch.arange(x.size(1) - 1, -1, -1.0, dtype=torch.float32).unsqueeze(1)
|
||||
else:
|
||||
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
|
||||
div_term = torch.exp(
|
||||
torch.arange(0, self.embedding_dim, 2, dtype=torch.float32) * -(math.log(10000.0) / self.embedding_dim)
|
||||
)
|
||||
pe[:, 0::2] = torch.sin(position * div_term)
|
||||
pe[:, 1::2] = torch.cos(position * div_term)
|
||||
pe = pe.unsqueeze(0)
|
||||
self.pe = pe.to(device=x.device, dtype=x.dtype).detach()
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
self.extend_pe(x)
|
||||
output = x.unsqueeze(-1) if x.ndim == 2 else x
|
||||
output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)]
|
||||
return self.dropout(output)
|
||||
@@ -0,0 +1,63 @@
|
||||
# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
|
||||
import math
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
|
||||
class TokenEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
vocab_size: int,
|
||||
dropout: float = 0.0,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.vocab_size = vocab_size
|
||||
self.embedding_dim = embedding_dim
|
||||
|
||||
self.dropout = torch.nn.Dropout(p=dropout)
|
||||
self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
|
||||
|
||||
@property
|
||||
def weight(self) -> torch.Tensor:
|
||||
return self.word_embeddings.weight
|
||||
|
||||
def embedding(self, index: int) -> torch.Tensor:
|
||||
return self.word_embeddings.weight[index : index + 1]
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
x = self.word_embeddings(x)
|
||||
x = self.dropout(x)
|
||||
return x
|
||||
|
||||
|
||||
class SinePositionalEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
dropout: float = 0.0,
|
||||
scale: bool = False,
|
||||
alpha: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.embedding_dim = embedding_dim
|
||||
self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
|
||||
self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
|
||||
self.dropout = torch.nn.Dropout(p=dropout)
|
||||
self.reverse = False
|
||||
self.div_term = torch.exp(torch.arange(0, self.embedding_dim, 2) * -(math.log(10000.0) / self.embedding_dim))
|
||||
|
||||
def extend_pe(self, x):
|
||||
position = torch.cumsum(torch.ones_like(x[:, :, 0]), dim=1).transpose(0, 1)
|
||||
scpe = (position * self.div_term).unsqueeze(0)
|
||||
pe = torch.cat([torch.sin(scpe), torch.cos(scpe)]).permute(1, 2, 0)
|
||||
pe = pe.contiguous().view(1, -1, self.embedding_dim)
|
||||
return pe
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
pe = self.extend_pe(x)
|
||||
output = x.unsqueeze(-1) if x.ndim == 2 else x
|
||||
output = output * self.x_scale + self.alpha * pe
|
||||
return self.dropout(output)
|
||||
@@ -0,0 +1,85 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/modules/lr_schedulers.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
import math
|
||||
|
||||
import torch
|
||||
from matplotlib import pyplot as plt
|
||||
from torch import nn
|
||||
from torch.optim import Adam
|
||||
|
||||
|
||||
class WarmupCosineLRSchedule(torch.optim.lr_scheduler._LRScheduler):
|
||||
"""
|
||||
Implements Warmup learning rate schedule until 'warmup_steps', going from 'init_lr' to 'peak_lr' for multiple optimizers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
optimizer,
|
||||
init_lr,
|
||||
peak_lr,
|
||||
end_lr,
|
||||
warmup_steps=10000,
|
||||
total_steps=400000,
|
||||
current_step=0,
|
||||
):
|
||||
self.init_lr = init_lr
|
||||
self.peak_lr = peak_lr
|
||||
self.end_lr = end_lr
|
||||
self.optimizer = optimizer
|
||||
self._warmup_rate = (peak_lr - init_lr) / warmup_steps
|
||||
self._decay_rate = (end_lr - peak_lr) / (total_steps - warmup_steps)
|
||||
self._current_step = current_step
|
||||
self.lr = init_lr
|
||||
self.warmup_steps = warmup_steps
|
||||
self.total_steps = total_steps
|
||||
self._last_lr = [self.lr]
|
||||
|
||||
def set_lr(self, lr):
|
||||
self._last_lr = [g["lr"] for g in self.optimizer.param_groups]
|
||||
for g in self.optimizer.param_groups:
|
||||
# g['lr'] = lr
|
||||
g["lr"] = self.end_lr ###锁定用线性
|
||||
|
||||
def step(self):
|
||||
if self._current_step < self.warmup_steps:
|
||||
lr = self.init_lr + self._warmup_rate * self._current_step
|
||||
|
||||
elif self._current_step > self.total_steps:
|
||||
lr = self.end_lr
|
||||
|
||||
else:
|
||||
decay_ratio = (self._current_step - self.warmup_steps) / (self.total_steps - self.warmup_steps)
|
||||
if decay_ratio < 0.0 or decay_ratio > 1.0:
|
||||
raise RuntimeError("Decay ratio must be in [0.0, 1.0]. Fix LR scheduler settings.")
|
||||
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
|
||||
lr = self.end_lr + coeff * (self.peak_lr - self.end_lr)
|
||||
|
||||
self.lr = lr = self.end_lr = 0.002 ###锁定用线性###不听话,直接锁定!
|
||||
self.set_lr(lr)
|
||||
self.lr = lr
|
||||
self._current_step += 1
|
||||
return self.lr
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
m = nn.Linear(10, 10)
|
||||
opt = Adam(m.parameters(), lr=1e-4)
|
||||
s = WarmupCosineLRSchedule(
|
||||
opt,
|
||||
1e-6,
|
||||
2e-4,
|
||||
1e-6,
|
||||
warmup_steps=2000,
|
||||
total_steps=20000,
|
||||
current_step=0,
|
||||
)
|
||||
lrs = []
|
||||
for i in range(25000):
|
||||
s.step()
|
||||
lrs.append(s.lr)
|
||||
print(s.lr)
|
||||
|
||||
plt.plot(lrs)
|
||||
plt.plot(range(0, 25000), lrs)
|
||||
plt.show()
|
||||
593
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/modules/optim.py
Normal file
593
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/modules/optim.py
Normal file
@@ -0,0 +1,593 @@
|
||||
# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
|
||||
#
|
||||
# See ../LICENSE for clarification regarding multiple authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import contextlib
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import List, Tuple
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
from torch.optim import Optimizer
|
||||
|
||||
|
||||
class BatchedOptimizer(Optimizer):
|
||||
"""
|
||||
This class adds to class Optimizer the capability to optimize parameters in batches:
|
||||
it will stack the parameters and their grads for you so the optimizer can work
|
||||
on tensors with an extra leading dimension. This is intended for speed with GPUs,
|
||||
as it reduces the number of kernels launched in the optimizer.
|
||||
|
||||
Args:
|
||||
params:
|
||||
"""
|
||||
|
||||
def __init__(self, params, defaults):
|
||||
super(BatchedOptimizer, self).__init__(params, defaults)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def batched_params(self, param_group, group_params_names):
|
||||
"""
|
||||
This function returns (technically, yields) a list of
|
||||
of tuples (p, state), where
|
||||
p is a `fake` parameter that is stacked (over axis 0) from real parameters
|
||||
that share the same shape, and its gradient is also stacked;
|
||||
`state` is the state corresponding to this batch of parameters
|
||||
(it will be physically located in the "state" for one of the real
|
||||
parameters, the last one that has any particular shape and dtype).
|
||||
|
||||
This function is decorated as a context manager so that it can
|
||||
write parameters back to their "real" locations.
|
||||
|
||||
The idea is, instead of doing:
|
||||
<code>
|
||||
for p in group["params"]:
|
||||
state = self.state[p]
|
||||
...
|
||||
</code>
|
||||
you can do:
|
||||
<code>
|
||||
with self.batched_params(group["params"]) as batches:
|
||||
for p, state, p_names in batches:
|
||||
...
|
||||
</code>
|
||||
|
||||
Args:
|
||||
group: a parameter group, which is a list of parameters; should be
|
||||
one of self.param_groups.
|
||||
group_params_names: name for each parameter in group,
|
||||
which is List[str].
|
||||
"""
|
||||
batches = defaultdict(list) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
|
||||
batches_names = defaultdict(list) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
|
||||
|
||||
assert len(param_group) == len(group_params_names)
|
||||
for p, named_p in zip(param_group, group_params_names):
|
||||
key = (str(p.dtype), *p.shape)
|
||||
batches[key].append(p)
|
||||
batches_names[key].append(named_p)
|
||||
|
||||
batches_names_keys = list(batches_names.keys())
|
||||
sorted_idx = sorted(range(len(batches_names)), key=lambda i: batches_names_keys[i])
|
||||
batches_names = [batches_names[batches_names_keys[idx]] for idx in sorted_idx]
|
||||
batches = [batches[batches_names_keys[idx]] for idx in sorted_idx]
|
||||
|
||||
stacked_params_dict = dict()
|
||||
|
||||
# turn batches into a list, in deterministic order.
|
||||
# tuples will contain tuples of (stacked_param, state, stacked_params_names),
|
||||
# one for each batch in `batches`.
|
||||
tuples = []
|
||||
|
||||
for batch, batch_names in zip(batches, batches_names):
|
||||
p = batch[0]
|
||||
# we arbitrarily store the state in the
|
||||
# state corresponding to the 1st parameter in the
|
||||
# group. class Optimizer will take care of saving/loading state.
|
||||
state = self.state[p]
|
||||
p_stacked = torch.stack(batch)
|
||||
grad = torch.stack([torch.zeros_like(p) if p.grad is None else p.grad for p in batch])
|
||||
p_stacked.grad = grad
|
||||
stacked_params_dict[key] = p_stacked
|
||||
tuples.append((p_stacked, state, batch_names))
|
||||
|
||||
yield tuples # <-- calling code will do the actual optimization here!
|
||||
|
||||
for (stacked_params, _state, _names), batch in zip(tuples, batches):
|
||||
for i, p in enumerate(batch): # batch is list of Parameter
|
||||
p.copy_(stacked_params[i])
|
||||
|
||||
|
||||
class ScaledAdam(BatchedOptimizer):
|
||||
"""
|
||||
Implements 'Scaled Adam', a variant of Adam where we scale each parameter's update
|
||||
proportional to the norm of that parameter; and also learn the scale of the parameter,
|
||||
in log space, subject to upper and lower limits (as if we had factored each parameter as
|
||||
param = underlying_param * log_scale.exp())
|
||||
|
||||
|
||||
Args:
|
||||
params: The parameters or param_groups to optimize (like other Optimizer subclasses)
|
||||
lr: The learning rate. We will typically use a learning rate schedule that starts
|
||||
at 0.03 and decreases over time, i.e. much higher than other common
|
||||
optimizers.
|
||||
clipping_scale: (e.g. 2.0)
|
||||
A scale for gradient-clipping: if specified, the normalized gradients
|
||||
over the whole model will be clipped to have 2-norm equal to
|
||||
`clipping_scale` times the median 2-norm over the most recent period
|
||||
of `clipping_update_period` minibatches. By "normalized gradients",
|
||||
we mean after multiplying by the rms parameter value for this tensor
|
||||
[for non-scalars]; this is appropriate because our update is scaled
|
||||
by this quantity.
|
||||
betas: beta1,beta2 are momentum constants for regular momentum, and moving sum-sq grad.
|
||||
Must satisfy 0 < beta <= beta2 < 1.
|
||||
scalar_lr_scale: A scaling factor on the learning rate, that we use to update the
|
||||
scale of each parameter tensor and scalar parameters of the mode..
|
||||
If each parameter were decomposed
|
||||
as p * p_scale.exp(), where (p**2).mean().sqrt() == 1.0, scalar_lr_scale
|
||||
would be a the scaling factor on the learning rate of p_scale.
|
||||
eps: A general-purpose epsilon to prevent division by zero
|
||||
param_min_rms: Minimum root-mean-square value of parameter tensor, for purposes of
|
||||
learning the scale on the parameters (we'll constrain the rms of each non-scalar
|
||||
parameter tensor to be >= this value)
|
||||
param_max_rms: Maximum root-mean-square value of parameter tensor, for purposes of
|
||||
learning the scale on the parameters (we'll constrain the rms of each non-scalar
|
||||
parameter tensor to be <= this value)
|
||||
scalar_max: Maximum absolute value for scalar parameters (applicable if your
|
||||
model has any parameters with numel() == 1).
|
||||
size_update_period: The periodicity, in steps, with which we update the size (scale)
|
||||
of the parameter tensor. This is provided to save a little time
|
||||
in the update.
|
||||
clipping_update_period: if clipping_scale is specified, this is the period
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
params,
|
||||
lr=3e-02,
|
||||
clipping_scale=None,
|
||||
betas=(0.9, 0.98),
|
||||
scalar_lr_scale=0.1,
|
||||
eps=1.0e-08,
|
||||
param_min_rms=1.0e-05,
|
||||
param_max_rms=3.0,
|
||||
scalar_max=10.0,
|
||||
size_update_period=4,
|
||||
clipping_update_period=100,
|
||||
parameters_names=None,
|
||||
show_dominant_parameters=True,
|
||||
):
|
||||
assert parameters_names is not None, (
|
||||
"Please prepare parameters_names,which is a List[List[str]]. Each List[str] is for a groupand each str is for a parameter"
|
||||
)
|
||||
defaults = dict(
|
||||
lr=lr,
|
||||
clipping_scale=clipping_scale,
|
||||
betas=betas,
|
||||
scalar_lr_scale=scalar_lr_scale,
|
||||
eps=eps,
|
||||
param_min_rms=param_min_rms,
|
||||
param_max_rms=param_max_rms,
|
||||
scalar_max=scalar_max,
|
||||
size_update_period=size_update_period,
|
||||
clipping_update_period=clipping_update_period,
|
||||
)
|
||||
|
||||
super(ScaledAdam, self).__init__(params, defaults)
|
||||
assert len(self.param_groups) == len(parameters_names)
|
||||
self.parameters_names = parameters_names
|
||||
self.show_dominant_parameters = show_dominant_parameters
|
||||
|
||||
def __setstate__(self, state):
|
||||
super(ScaledAdam, self).__setstate__(state)
|
||||
|
||||
@torch.no_grad()
|
||||
def step(self, closure=None):
|
||||
"""Performs a single optimization step.
|
||||
|
||||
Arguments:
|
||||
closure (callable, optional): A closure that reevaluates the model
|
||||
and returns the loss.
|
||||
"""
|
||||
loss = None
|
||||
if closure is not None:
|
||||
with torch.enable_grad():
|
||||
loss = closure()
|
||||
|
||||
batch = True
|
||||
|
||||
for group, group_params_names in zip(self.param_groups, self.parameters_names):
|
||||
with self.batched_params(group["params"], group_params_names) as batches:
|
||||
# batches is list of pairs (stacked_param, state). stacked_param is like
|
||||
# a regular parameter, and will have a .grad, but the 1st dim corresponds to
|
||||
# a stacking dim, it is not a real dim.
|
||||
|
||||
if len(batches[0][1]) == 0: # if len(first state) == 0: not yet initialized
|
||||
clipping_scale = 1
|
||||
else:
|
||||
clipping_scale = self._get_clipping_scale(group, batches)
|
||||
|
||||
for p, state, _ in batches:
|
||||
# Perform optimization step.
|
||||
# grad is not going to be None, we handled that when creating the batches.
|
||||
grad = p.grad
|
||||
if grad.is_sparse:
|
||||
raise RuntimeError("ScaledAdam optimizer does not support sparse gradients")
|
||||
# State initialization
|
||||
if len(state) == 0:
|
||||
self._init_state(group, p, state)
|
||||
|
||||
self._step_one_batch(group, p, state, clipping_scale)
|
||||
|
||||
return loss
|
||||
|
||||
def _init_state(self, group: dict, p: Tensor, state: dict):
|
||||
"""
|
||||
Initializes state dict for parameter 'p'. Assumes that dim 0 of tensor p
|
||||
is actually the batch dimension, corresponding to batched-together
|
||||
parameters of a given shape.
|
||||
|
||||
|
||||
Args:
|
||||
group: Dict to look up configuration values.
|
||||
p: The parameter that we are initializing the state for
|
||||
state: Dict from string to whatever state we are initializing
|
||||
"""
|
||||
size_update_period = group["size_update_period"]
|
||||
|
||||
state["step"] = 0
|
||||
|
||||
kwargs = {"device": p.device, "dtype": p.dtype}
|
||||
|
||||
# 'delta' implements conventional momentum. There are
|
||||
# several different kinds of update going on, so rather than
|
||||
# compute "exp_avg" like in Adam, we store and decay a
|
||||
# parameter-change "delta", which combines all forms of
|
||||
# update. this is equivalent to how it's done in Adam,
|
||||
# except for the first few steps.
|
||||
state["delta"] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
||||
|
||||
batch_size = p.shape[0]
|
||||
numel = p.numel() // batch_size
|
||||
numel = p.numel()
|
||||
|
||||
if numel > 1:
|
||||
# "param_rms" just periodically records the scalar root-mean-square value of
|
||||
# the parameter tensor.
|
||||
# it has a shape like (batch_size, 1, 1, 1, 1)
|
||||
param_rms = (p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt()
|
||||
state["param_rms"] = param_rms
|
||||
|
||||
state["scale_exp_avg_sq"] = torch.zeros_like(param_rms)
|
||||
state["scale_grads"] = torch.zeros(size_update_period, *param_rms.shape, **kwargs)
|
||||
|
||||
# exp_avg_sq is the weighted sum of scaled gradients. as in Adam.
|
||||
state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
||||
|
||||
def _get_clipping_scale(self, group: dict, tuples: List[Tuple[Tensor, dict, List[str]]]) -> float:
|
||||
"""
|
||||
Returns a scalar factor <= 1.0 that dictates gradient clipping, i.e. we will scale the gradients
|
||||
by this amount before applying the rest of the update.
|
||||
|
||||
Args:
|
||||
group: the parameter group, an item in self.param_groups
|
||||
tuples: a list of tuples of (param, state, param_names)
|
||||
where param is a batched set of parameters,
|
||||
with a .grad (1st dim is batch dim)
|
||||
and state is the state-dict where optimization parameters are kept.
|
||||
param_names is a List[str] while each str is name for a parameter
|
||||
in batched set of parameters "param".
|
||||
"""
|
||||
assert len(tuples) >= 1
|
||||
clipping_scale = group["clipping_scale"]
|
||||
(first_p, first_state, _) = tuples[0]
|
||||
step = first_state["step"]
|
||||
if clipping_scale is None or step == 0:
|
||||
# no clipping. return early on step == 0 because the other
|
||||
# parameters' state won't have been initialized yet.
|
||||
return 1.0
|
||||
clipping_update_period = group["clipping_update_period"]
|
||||
|
||||
tot_sumsq = torch.tensor(0.0, device=first_p.device)
|
||||
for p, state, param_names in tuples:
|
||||
grad = p.grad
|
||||
if grad.is_sparse:
|
||||
raise RuntimeError("ScaledAdam optimizer does not support sparse gradients")
|
||||
if p.numel() == p.shape[0]: # a batch of scalars
|
||||
tot_sumsq += (grad**2).sum() # sum() to change shape [1] to []
|
||||
else:
|
||||
tot_sumsq += ((grad * state["param_rms"]) ** 2).sum()
|
||||
|
||||
tot_norm = tot_sumsq.sqrt()
|
||||
if "model_norms" not in first_state:
|
||||
first_state["model_norms"] = torch.zeros(clipping_update_period, device=p.device)
|
||||
first_state["model_norms"][step % clipping_update_period] = tot_norm
|
||||
|
||||
if step % clipping_update_period == 0:
|
||||
# Print some stats.
|
||||
# We don't reach here if step == 0 because we would have returned
|
||||
# above.
|
||||
sorted_norms = first_state["model_norms"].sort()[0].to("cpu")
|
||||
quartiles = []
|
||||
for n in range(0, 5):
|
||||
index = min(
|
||||
clipping_update_period - 1,
|
||||
(clipping_update_period // 4) * n,
|
||||
)
|
||||
quartiles.append(sorted_norms[index].item())
|
||||
|
||||
median = quartiles[2]
|
||||
threshold = clipping_scale * median
|
||||
first_state["model_norm_threshold"] = threshold
|
||||
percent_clipped = (
|
||||
first_state["num_clipped"] * 100.0 / clipping_update_period if "num_clipped" in first_state else 0.0
|
||||
)
|
||||
first_state["num_clipped"] = 0
|
||||
quartiles = " ".join(["%.3e" % x for x in quartiles])
|
||||
logging.info(
|
||||
f"Clipping_scale={clipping_scale}, grad-norm quartiles {quartiles}, threshold={threshold:.3e}, percent-clipped={percent_clipped:.1f}"
|
||||
)
|
||||
|
||||
if step < clipping_update_period:
|
||||
return 1.0 # We have not yet estimated a norm to clip to.
|
||||
else:
|
||||
try:
|
||||
model_norm_threshold = first_state["model_norm_threshold"]
|
||||
except KeyError:
|
||||
logging.info(
|
||||
"Warning: model_norm_threshold not in state: possibly you changed config when restarting, adding clipping_scale option?"
|
||||
)
|
||||
return 1.0
|
||||
ans = min(1.0, (model_norm_threshold / (tot_norm + 1.0e-20)).item())
|
||||
if ans < 1.0:
|
||||
first_state["num_clipped"] += 1
|
||||
if ans < 0.1:
|
||||
logging.warning(f"Scaling gradients by {ans}, model_norm_threshold={model_norm_threshold}")
|
||||
if self.show_dominant_parameters:
|
||||
assert p.shape[0] == len(param_names)
|
||||
self._show_gradient_dominating_parameter(tuples, tot_sumsq)
|
||||
return ans
|
||||
|
||||
def _show_gradient_dominating_parameter(self, tuples: List[Tuple[Tensor, dict, List[str]]], tot_sumsq: Tensor):
|
||||
"""
|
||||
Show information of parameter which dominating tot_sumsq.
|
||||
|
||||
Args:
|
||||
tuples: a list of tuples of (param, state, param_names)
|
||||
where param is a batched set of parameters,
|
||||
with a .grad (1st dim is batch dim)
|
||||
and state is the state-dict where optimization parameters are kept.
|
||||
param_names is a List[str] while each str is name for a parameter
|
||||
in batched set of parameters "param".
|
||||
tot_sumsq: sumsq of all parameters. Though it's could be calculated
|
||||
from tuples, we still pass it to save some time.
|
||||
"""
|
||||
all_sumsq_orig = {}
|
||||
for p, state, batch_param_names in tuples:
|
||||
# p is a stacked batch parameters.
|
||||
batch_grad = p.grad
|
||||
if p.numel() == p.shape[0]: # a batch of scalars
|
||||
batch_sumsq_orig = batch_grad**2
|
||||
# Dummpy values used by following `zip` statement.
|
||||
batch_rms_orig = torch.ones(p.shape[0])
|
||||
else:
|
||||
batch_rms_orig = state["param_rms"]
|
||||
batch_sumsq_orig = ((batch_grad * batch_rms_orig) ** 2).sum(dim=list(range(1, batch_grad.ndim)))
|
||||
|
||||
for name, sumsq_orig, rms, grad in zip(
|
||||
batch_param_names,
|
||||
batch_sumsq_orig,
|
||||
batch_rms_orig,
|
||||
batch_grad,
|
||||
):
|
||||
proportion_orig = sumsq_orig / tot_sumsq
|
||||
all_sumsq_orig[name] = (proportion_orig, sumsq_orig, rms, grad)
|
||||
|
||||
assert torch.isclose(
|
||||
sum([value[0] for value in all_sumsq_orig.values()]).cpu(),
|
||||
torch.tensor(1.0),
|
||||
)
|
||||
sorted_by_proportion = {
|
||||
k: v
|
||||
for k, v in sorted(
|
||||
all_sumsq_orig.items(),
|
||||
key=lambda item: item[1][0],
|
||||
reverse=True,
|
||||
)
|
||||
}
|
||||
dominant_param_name = next(iter(sorted_by_proportion))
|
||||
(
|
||||
dominant_proportion,
|
||||
dominant_sumsq,
|
||||
dominant_rms,
|
||||
dominant_grad,
|
||||
) = sorted_by_proportion[dominant_param_name]
|
||||
logging.info(
|
||||
f"Parameter Dominating tot_sumsq {dominant_param_name}"
|
||||
f" with proportion {dominant_proportion:.2f},"
|
||||
f" where dominant_sumsq=(grad_sumsq*orig_rms_sq)"
|
||||
f"={dominant_sumsq:.3e},"
|
||||
f" grad_sumsq = {(dominant_grad**2).sum():.3e},"
|
||||
f" orig_rms_sq={(dominant_rms**2).item():.3e}"
|
||||
)
|
||||
|
||||
def _step_one_batch(self, group: dict, p: Tensor, state: dict, clipping_scale: float):
|
||||
"""
|
||||
Do the step for one parameter, which is actually going to be a batch of
|
||||
`real` parameters, with dim 0 as the batch dim.
|
||||
Args:
|
||||
group: dict to look up configuration values
|
||||
p: parameter to update (actually multiple parameters stacked together
|
||||
as a batch)
|
||||
state: state-dict for p, to look up the optimizer state
|
||||
"""
|
||||
lr = group["lr"]
|
||||
size_update_period = group["size_update_period"]
|
||||
beta1 = group["betas"][0]
|
||||
|
||||
grad = p.grad
|
||||
if clipping_scale != 1.0:
|
||||
grad = grad * clipping_scale
|
||||
step = state["step"]
|
||||
delta = state["delta"]
|
||||
|
||||
delta.mul_(beta1)
|
||||
batch_size = p.shape[0]
|
||||
numel = p.numel() // batch_size
|
||||
if numel > 1:
|
||||
# Update the size/scale of p, and set param_rms
|
||||
scale_grads = state["scale_grads"]
|
||||
scale_grads[step % size_update_period] = (p * grad).sum(dim=list(range(1, p.ndim)), keepdim=True)
|
||||
if step % size_update_period == size_update_period - 1:
|
||||
param_rms = state["param_rms"] # shape: (batch_size, 1, 1, ..)
|
||||
param_rms.copy_((p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt())
|
||||
if step > 0:
|
||||
# self._size_update() learns the overall scale on the
|
||||
# parameter, by shrinking or expanding it.
|
||||
self._size_update(group, scale_grads, p, state)
|
||||
|
||||
if numel == 1:
|
||||
# For parameters with 1 element we just use regular Adam.
|
||||
# Updates delta.
|
||||
self._step_scalar(group, p, state)
|
||||
else:
|
||||
self._step(group, p, state)
|
||||
|
||||
state["step"] = step + 1
|
||||
|
||||
def _size_update(
|
||||
self,
|
||||
group: dict,
|
||||
scale_grads: Tensor,
|
||||
p: Tensor,
|
||||
state: dict,
|
||||
) -> None:
|
||||
"""
|
||||
Called only where p.numel() > 1, this updates the scale of the parameter.
|
||||
If we imagine: p = underlying_param * scale.exp(), and we are doing
|
||||
gradient descent on underlying param and on scale, this function does the update
|
||||
on `scale`.
|
||||
|
||||
Args:
|
||||
group: dict to look up configuration values
|
||||
scale_grads: a tensor of shape (size_update_period, batch_size, 1, 1,...) containing
|
||||
grads w.r.t. the scales.
|
||||
p: The parameter to update
|
||||
state: The state-dict of p
|
||||
"""
|
||||
|
||||
param_rms = state["param_rms"]
|
||||
beta1, beta2 = group["betas"]
|
||||
size_lr = group["lr"] * group["scalar_lr_scale"]
|
||||
param_min_rms = group["param_min_rms"]
|
||||
param_max_rms = group["param_max_rms"]
|
||||
eps = group["eps"]
|
||||
step = state["step"]
|
||||
batch_size = p.shape[0]
|
||||
|
||||
size_update_period = scale_grads.shape[0]
|
||||
# correct beta2 for the size update period: we will have
|
||||
# faster decay at this level.
|
||||
beta2_corr = beta2**size_update_period
|
||||
|
||||
scale_exp_avg_sq = state["scale_exp_avg_sq"] # shape: (batch_size, 1, 1, ..)
|
||||
scale_exp_avg_sq.mul_(beta2_corr).add_(
|
||||
(scale_grads**2).mean(dim=0), # mean over dim `size_update_period`
|
||||
alpha=1 - beta2_corr,
|
||||
) # shape is (batch_size, 1, 1, ...)
|
||||
|
||||
# The 1st time we reach here is when size_step == 1.
|
||||
size_step = (step + 1) // size_update_period
|
||||
bias_correction2 = 1 - beta2_corr**size_step
|
||||
# we don't bother with bias_correction1; this will help prevent divergence
|
||||
# at the start of training.
|
||||
|
||||
denom = scale_exp_avg_sq.sqrt() + eps
|
||||
|
||||
scale_step = -size_lr * (bias_correction2**0.5) * scale_grads.sum(dim=0) / denom
|
||||
|
||||
is_too_small = param_rms < param_min_rms
|
||||
is_too_large = param_rms > param_max_rms
|
||||
|
||||
# when the param gets too small, just don't shrink it any further.
|
||||
scale_step.masked_fill_(is_too_small, 0.0)
|
||||
# when it gets too large, stop it from getting any larger.
|
||||
scale_step.masked_fill_(is_too_large, -size_lr * size_update_period)
|
||||
delta = state["delta"]
|
||||
# the factor of (1-beta1) relates to momentum.
|
||||
delta.add_(p * scale_step, alpha=(1 - beta1))
|
||||
|
||||
def _step(self, group: dict, p: Tensor, state: dict):
|
||||
"""
|
||||
This function does the core update of self.step(), in the case where the members of
|
||||
the batch have more than 1 element.
|
||||
|
||||
Args:
|
||||
group: A dict which will be used to look up configuration values
|
||||
p: The parameter to be updated
|
||||
grad: The grad of p
|
||||
state: The state-dict corresponding to parameter p
|
||||
|
||||
This function modifies p.
|
||||
"""
|
||||
grad = p.grad
|
||||
lr = group["lr"]
|
||||
beta1, beta2 = group["betas"]
|
||||
eps = group["eps"]
|
||||
param_min_rms = group["param_min_rms"]
|
||||
step = state["step"]
|
||||
|
||||
exp_avg_sq = state["exp_avg_sq"]
|
||||
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
|
||||
|
||||
this_step = state["step"] - (state["zero_step"] if "zero_step" in state else 0)
|
||||
bias_correction2 = 1 - beta2 ** (this_step + 1)
|
||||
if bias_correction2 < 0.99:
|
||||
# note: not in-place.
|
||||
exp_avg_sq = exp_avg_sq * (1.0 / bias_correction2)
|
||||
|
||||
denom = exp_avg_sq.sqrt()
|
||||
denom += eps
|
||||
grad = grad / denom
|
||||
|
||||
alpha = -lr * (1 - beta1) * state["param_rms"].clamp(min=param_min_rms)
|
||||
|
||||
delta = state["delta"]
|
||||
delta.add_(grad * alpha)
|
||||
p.add_(delta)
|
||||
|
||||
def _step_scalar(self, group: dict, p: Tensor, state: dict):
|
||||
"""
|
||||
A simplified form of the core update for scalar tensors, where we cannot get a good
|
||||
estimate of the parameter rms.
|
||||
"""
|
||||
beta1, beta2 = group["betas"]
|
||||
scalar_max = group["scalar_max"]
|
||||
eps = group["eps"]
|
||||
lr = group["lr"] * group["scalar_lr_scale"]
|
||||
grad = p.grad
|
||||
|
||||
exp_avg_sq = state["exp_avg_sq"] # shape: (batch_size,)
|
||||
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
|
||||
|
||||
# bias_correction2 is like in Adam. Don't bother with bias_correction1;
|
||||
# slower update at the start will help stability anyway.
|
||||
bias_correction2 = 1 - beta2 ** (state["step"] + 1)
|
||||
denom = (exp_avg_sq / bias_correction2).sqrt() + eps
|
||||
|
||||
delta = state["delta"]
|
||||
delta.add_(grad / denom, alpha=-lr * (1 - beta1))
|
||||
p.clamp_(min=-scalar_max, max=scalar_max)
|
||||
p.add_(delta)
|
||||
@@ -0,0 +1,428 @@
|
||||
from torch.nn.functional import *
|
||||
from torch.nn.functional import (
|
||||
_mha_shape_check,
|
||||
_canonical_mask,
|
||||
_none_or_dtype,
|
||||
_in_projection_packed,
|
||||
)
|
||||
import torch
|
||||
# Tensor = torch.Tensor
|
||||
# from typing import Callable, List, Optional, Tuple, Union
|
||||
|
||||
|
||||
def multi_head_attention_forward_patched(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
embed_dim_to_check,
|
||||
num_heads,
|
||||
in_proj_weight,
|
||||
in_proj_bias,
|
||||
bias_k,
|
||||
bias_v,
|
||||
add_zero_attn,
|
||||
dropout_p: float,
|
||||
out_proj_weight,
|
||||
out_proj_bias,
|
||||
training=True,
|
||||
key_padding_mask=None,
|
||||
need_weights=True,
|
||||
attn_mask=None,
|
||||
use_separate_proj_weight=False,
|
||||
q_proj_weight=None,
|
||||
k_proj_weight=None,
|
||||
v_proj_weight=None,
|
||||
static_k=None,
|
||||
static_v=None,
|
||||
average_attn_weights=True,
|
||||
is_causal=False,
|
||||
cache=None,
|
||||
):
|
||||
r"""
|
||||
Args:
|
||||
query, key, value: map a query and a set of key-value pairs to an output.
|
||||
See "Attention Is All You Need" for more details.
|
||||
embed_dim_to_check: total dimension of the model.
|
||||
num_heads: parallel attention heads.
|
||||
in_proj_weight, in_proj_bias: input projection weight and bias.
|
||||
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
|
||||
add_zero_attn: add a new batch of zeros to the key and
|
||||
value sequences at dim=1.
|
||||
dropout_p: probability of an element to be zeroed.
|
||||
out_proj_weight, out_proj_bias: the output projection weight and bias.
|
||||
training: apply dropout if is ``True``.
|
||||
key_padding_mask: if provided, specified padding elements in the key will
|
||||
be ignored by the attention. This is an binary mask. When the value is True,
|
||||
the corresponding value on the attention layer will be filled with -inf.
|
||||
need_weights: output attn_output_weights.
|
||||
Default: `True`
|
||||
Note: `needs_weight` defaults to `True`, but should be set to `False`
|
||||
For best performance when attention weights are not nedeeded.
|
||||
*Setting needs_weights to `True`
|
||||
leads to a significant performance degradation.*
|
||||
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
|
||||
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
|
||||
is_causal: If specified, applies a causal mask as attention mask, and ignores
|
||||
attn_mask for computing scaled dot product attention.
|
||||
Default: ``False``.
|
||||
.. warning::
|
||||
is_causal is provides a hint that the attn_mask is the
|
||||
causal mask.Providing incorrect hints can result in
|
||||
incorrect execution, including forward and backward
|
||||
compatibility.
|
||||
use_separate_proj_weight: the function accept the proj. weights for query, key,
|
||||
and value in different forms. If false, in_proj_weight will be used, which is
|
||||
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
|
||||
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
|
||||
static_k, static_v: static key and value used for attention operators.
|
||||
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
|
||||
Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
|
||||
when ``need_weights=True.``. Default: True
|
||||
|
||||
|
||||
Shape:
|
||||
Inputs:
|
||||
- query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
|
||||
the embedding dimension.
|
||||
- key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
|
||||
the embedding dimension.
|
||||
- value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
|
||||
the embedding dimension.
|
||||
- key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
|
||||
If a FloatTensor is provided, it will be directly added to the value.
|
||||
If a BoolTensor is provided, the positions with the
|
||||
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
|
||||
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
|
||||
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
|
||||
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
|
||||
positions. If a BoolTensor is provided, positions with ``True``
|
||||
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
|
||||
is provided, it will be added to the attention weight.
|
||||
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
|
||||
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
|
||||
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
|
||||
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
|
||||
|
||||
Outputs:
|
||||
- attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
|
||||
E is the embedding dimension.
|
||||
- attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
|
||||
attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
|
||||
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
|
||||
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
|
||||
head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
|
||||
"""
|
||||
tens_ops = (
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
in_proj_weight,
|
||||
in_proj_bias,
|
||||
bias_k,
|
||||
bias_v,
|
||||
out_proj_weight,
|
||||
out_proj_bias,
|
||||
)
|
||||
if has_torch_function(tens_ops):
|
||||
return handle_torch_function(
|
||||
multi_head_attention_forward,
|
||||
tens_ops,
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
embed_dim_to_check,
|
||||
num_heads,
|
||||
in_proj_weight,
|
||||
in_proj_bias,
|
||||
bias_k,
|
||||
bias_v,
|
||||
add_zero_attn,
|
||||
dropout_p,
|
||||
out_proj_weight,
|
||||
out_proj_bias,
|
||||
training=training,
|
||||
key_padding_mask=key_padding_mask,
|
||||
need_weights=need_weights,
|
||||
attn_mask=attn_mask,
|
||||
is_causal=is_causal,
|
||||
use_separate_proj_weight=use_separate_proj_weight,
|
||||
q_proj_weight=q_proj_weight,
|
||||
k_proj_weight=k_proj_weight,
|
||||
v_proj_weight=v_proj_weight,
|
||||
static_k=static_k,
|
||||
static_v=static_v,
|
||||
average_attn_weights=average_attn_weights,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
|
||||
|
||||
# For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
|
||||
# is batched, run the computation and before returning squeeze the
|
||||
# batch dimension so that the output doesn't carry this temporary batch dimension.
|
||||
if not is_batched:
|
||||
# unsqueeze if the input is unbatched
|
||||
query = query.unsqueeze(1)
|
||||
key = key.unsqueeze(1)
|
||||
value = value.unsqueeze(1)
|
||||
if key_padding_mask is not None:
|
||||
key_padding_mask = key_padding_mask.unsqueeze(0)
|
||||
|
||||
# set up shape vars
|
||||
tgt_len, bsz, embed_dim = query.shape
|
||||
src_len, _, _ = key.shape
|
||||
|
||||
key_padding_mask = _canonical_mask(
|
||||
mask=key_padding_mask,
|
||||
mask_name="key_padding_mask",
|
||||
other_type=_none_or_dtype(attn_mask),
|
||||
other_name="attn_mask",
|
||||
target_type=query.dtype,
|
||||
)
|
||||
|
||||
if is_causal and attn_mask is None:
|
||||
raise RuntimeError(
|
||||
"Need attn_mask if specifying the is_causal hint. "
|
||||
"You may use the Transformer module method "
|
||||
"`generate_square_subsequent_mask` to create this mask."
|
||||
)
|
||||
|
||||
if is_causal and key_padding_mask is None and not need_weights:
|
||||
# when we have a kpm or need weights, we need attn_mask
|
||||
# Otherwise, we use the is_causal hint go as is_causal
|
||||
# indicator to SDPA.
|
||||
attn_mask = None
|
||||
else:
|
||||
attn_mask = _canonical_mask(
|
||||
mask=attn_mask,
|
||||
mask_name="attn_mask",
|
||||
other_type=None,
|
||||
other_name="",
|
||||
target_type=query.dtype,
|
||||
check_other=False,
|
||||
)
|
||||
|
||||
if key_padding_mask is not None:
|
||||
# We have the attn_mask, and use that to merge kpm into it.
|
||||
# Turn off use of is_causal hint, as the merged mask is no
|
||||
# longer causal.
|
||||
is_causal = False
|
||||
|
||||
assert embed_dim == embed_dim_to_check, (
|
||||
f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
|
||||
)
|
||||
if isinstance(embed_dim, torch.Tensor):
|
||||
# embed_dim can be a tensor when JIT tracing
|
||||
head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
|
||||
else:
|
||||
head_dim = embed_dim // num_heads
|
||||
assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
|
||||
if use_separate_proj_weight:
|
||||
# allow MHA to have different embedding dimensions when separate projection weights are used
|
||||
assert key.shape[:2] == value.shape[:2], (
|
||||
f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
|
||||
)
|
||||
else:
|
||||
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
|
||||
|
||||
#
|
||||
# compute in-projection
|
||||
#
|
||||
if not use_separate_proj_weight:
|
||||
assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
|
||||
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
|
||||
else:
|
||||
assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
|
||||
assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
|
||||
assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
|
||||
if in_proj_bias is None:
|
||||
b_q = b_k = b_v = None
|
||||
else:
|
||||
b_q, b_k, b_v = in_proj_bias.chunk(3)
|
||||
q, k, v = _in_projection(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
q_proj_weight,
|
||||
k_proj_weight,
|
||||
v_proj_weight,
|
||||
b_q,
|
||||
b_k,
|
||||
b_v,
|
||||
)
|
||||
if cache != None:
|
||||
if cache["first_infer"] == 1:
|
||||
cache["k"][cache["stage"]] = k
|
||||
# print(0,cache["k"].shape)
|
||||
cache["v"][cache["stage"]] = v
|
||||
else: ###12个layer每个都要留自己的cache_kv
|
||||
# print(1,cache["k"].shape)
|
||||
cache["k"][cache["stage"]] = torch.cat(
|
||||
[cache["k"][cache["stage"]], k], 0
|
||||
) ##本来时序是1,但是proj的时候可能transpose了所以时序到0维了
|
||||
cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]], v], 0)
|
||||
# print(2, cache["k"].shape)
|
||||
src_len = cache["k"][cache["stage"]].shape[0]
|
||||
k = cache["k"][cache["stage"]]
|
||||
v = cache["v"][cache["stage"]]
|
||||
# if attn_mask is not None:
|
||||
# attn_mask=attn_mask[-1:,]
|
||||
# print(attn_mask.shape,attn_mask)
|
||||
cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
|
||||
# print(2333,cache)
|
||||
# prep attention mask
|
||||
|
||||
attn_mask = _canonical_mask(
|
||||
mask=attn_mask,
|
||||
mask_name="attn_mask",
|
||||
other_type=None,
|
||||
other_name="",
|
||||
target_type=q.dtype,
|
||||
check_other=False,
|
||||
)
|
||||
|
||||
if attn_mask is not None:
|
||||
# ensure attn_mask's dim is 3
|
||||
if attn_mask.dim() == 2:
|
||||
correct_2d_size = (tgt_len, src_len)
|
||||
if attn_mask.shape != correct_2d_size:
|
||||
raise RuntimeError(
|
||||
f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
|
||||
)
|
||||
attn_mask = attn_mask.unsqueeze(0)
|
||||
elif attn_mask.dim() == 3:
|
||||
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
|
||||
if attn_mask.shape != correct_3d_size:
|
||||
raise RuntimeError(
|
||||
f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
|
||||
|
||||
# add bias along batch dimension (currently second)
|
||||
if bias_k is not None and bias_v is not None:
|
||||
assert static_k is None, "bias cannot be added to static key."
|
||||
assert static_v is None, "bias cannot be added to static value."
|
||||
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
|
||||
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
|
||||
if attn_mask is not None:
|
||||
attn_mask = pad(attn_mask, (0, 1))
|
||||
if key_padding_mask is not None:
|
||||
key_padding_mask = pad(key_padding_mask, (0, 1))
|
||||
else:
|
||||
assert bias_k is None
|
||||
assert bias_v is None
|
||||
|
||||
#
|
||||
# reshape q, k, v for multihead attention and make em batch first
|
||||
#
|
||||
q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
|
||||
if static_k is None:
|
||||
k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
|
||||
else:
|
||||
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
|
||||
assert static_k.size(0) == bsz * num_heads, (
|
||||
f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
|
||||
)
|
||||
assert static_k.size(2) == head_dim, f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
|
||||
k = static_k
|
||||
if static_v is None:
|
||||
v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
|
||||
else:
|
||||
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
|
||||
assert static_v.size(0) == bsz * num_heads, (
|
||||
f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
|
||||
)
|
||||
assert static_v.size(2) == head_dim, f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
|
||||
v = static_v
|
||||
|
||||
# add zero attention along batch dimension (now first)
|
||||
if add_zero_attn:
|
||||
zero_attn_shape = (bsz * num_heads, 1, head_dim)
|
||||
k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
|
||||
v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
|
||||
if attn_mask is not None:
|
||||
attn_mask = pad(attn_mask, (0, 1))
|
||||
if key_padding_mask is not None:
|
||||
key_padding_mask = pad(key_padding_mask, (0, 1))
|
||||
|
||||
# update source sequence length after adjustments
|
||||
src_len = k.size(1)
|
||||
|
||||
# merge key padding and attention masks
|
||||
if key_padding_mask is not None:
|
||||
assert key_padding_mask.shape == (
|
||||
bsz,
|
||||
src_len,
|
||||
), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
|
||||
key_padding_mask = (
|
||||
key_padding_mask.view(bsz, 1, 1, src_len).expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
|
||||
)
|
||||
if attn_mask is None:
|
||||
attn_mask = key_padding_mask
|
||||
else:
|
||||
attn_mask = attn_mask + key_padding_mask
|
||||
|
||||
# adjust dropout probability
|
||||
if not training:
|
||||
dropout_p = 0.0
|
||||
|
||||
#
|
||||
# (deep breath) calculate attention and out projection
|
||||
#
|
||||
|
||||
if need_weights:
|
||||
B, Nt, E = q.shape
|
||||
q_scaled = q / math.sqrt(E)
|
||||
|
||||
assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
|
||||
|
||||
if attn_mask is not None:
|
||||
attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
|
||||
else:
|
||||
attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
|
||||
attn_output_weights = softmax(attn_output_weights, dim=-1)
|
||||
if dropout_p > 0.0:
|
||||
attn_output_weights = dropout(attn_output_weights, p=dropout_p)
|
||||
|
||||
attn_output = torch.bmm(attn_output_weights, v)
|
||||
|
||||
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
|
||||
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
|
||||
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
|
||||
|
||||
# optionally average attention weights over heads
|
||||
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
|
||||
if average_attn_weights:
|
||||
attn_output_weights = attn_output_weights.mean(dim=1)
|
||||
|
||||
if not is_batched:
|
||||
# squeeze the output if input was unbatched
|
||||
attn_output = attn_output.squeeze(1)
|
||||
attn_output_weights = attn_output_weights.squeeze(0)
|
||||
return attn_output, attn_output_weights
|
||||
else:
|
||||
# attn_mask can be either (L,S) or (N*num_heads, L, S)
|
||||
# if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
|
||||
# in order to match the input for SDPA of (N, num_heads, L, S)
|
||||
if attn_mask is not None:
|
||||
if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
|
||||
attn_mask = attn_mask.unsqueeze(0)
|
||||
else:
|
||||
attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
|
||||
|
||||
q = q.view(bsz, num_heads, tgt_len, head_dim)
|
||||
k = k.view(bsz, num_heads, src_len, head_dim)
|
||||
v = v.view(bsz, num_heads, src_len, head_dim)
|
||||
|
||||
# with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
|
||||
attn_output = scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
|
||||
|
||||
attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
|
||||
|
||||
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
|
||||
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
|
||||
if not is_batched:
|
||||
# squeeze the output if input was unbatched
|
||||
attn_output = attn_output.squeeze(1)
|
||||
return attn_output, None
|
||||
@@ -0,0 +1,85 @@
|
||||
from torch.nn.functional import *
|
||||
from torch.nn.functional import (
|
||||
_canonical_mask,
|
||||
)
|
||||
|
||||
|
||||
def multi_head_attention_forward_patched(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
embed_dim_to_check: int,
|
||||
num_heads: int,
|
||||
in_proj_weight,
|
||||
in_proj_bias: Optional[Tensor],
|
||||
bias_k: Optional[Tensor],
|
||||
bias_v: Optional[Tensor],
|
||||
add_zero_attn: bool,
|
||||
dropout_p: float,
|
||||
out_proj_weight: Tensor,
|
||||
out_proj_bias: Optional[Tensor],
|
||||
training: bool = True,
|
||||
key_padding_mask: Optional[Tensor] = None,
|
||||
need_weights: bool = True,
|
||||
attn_mask: Optional[Tensor] = None,
|
||||
use_separate_proj_weight: bool = False,
|
||||
q_proj_weight: Optional[Tensor] = None,
|
||||
k_proj_weight: Optional[Tensor] = None,
|
||||
v_proj_weight: Optional[Tensor] = None,
|
||||
static_k: Optional[Tensor] = None,
|
||||
static_v: Optional[Tensor] = None,
|
||||
average_attn_weights: bool = True,
|
||||
is_causal: bool = False,
|
||||
cache=None,
|
||||
) -> Tuple[Tensor, Optional[Tensor]]:
|
||||
# set up shape vars
|
||||
_, _, embed_dim = query.shape
|
||||
attn_mask = _canonical_mask(
|
||||
mask=attn_mask,
|
||||
mask_name="attn_mask",
|
||||
other_type=None,
|
||||
other_name="",
|
||||
target_type=query.dtype,
|
||||
check_other=False,
|
||||
)
|
||||
head_dim = embed_dim // num_heads
|
||||
|
||||
proj_qkv = linear(query, in_proj_weight, in_proj_bias)
|
||||
proj_qkv = proj_qkv.unflatten(-1, (3, query.size(-1))).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
|
||||
q, k, v = proj_qkv[0], proj_qkv[1], proj_qkv[2]
|
||||
|
||||
if cache["first_infer"] == 1:
|
||||
cache["k"][cache["stage"]] = k
|
||||
cache["v"][cache["stage"]] = v
|
||||
else:
|
||||
cache["k"][cache["stage"]] = torch.cat([cache["k"][cache["stage"]][:-1], k], 0)
|
||||
cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]][:-1], v], 0)
|
||||
k = cache["k"][cache["stage"]]
|
||||
v = cache["v"][cache["stage"]]
|
||||
cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
|
||||
|
||||
attn_mask = _canonical_mask(
|
||||
mask=attn_mask,
|
||||
mask_name="attn_mask",
|
||||
other_type=None,
|
||||
other_name="",
|
||||
target_type=q.dtype,
|
||||
check_other=False,
|
||||
)
|
||||
attn_mask = attn_mask.unsqueeze(0)
|
||||
|
||||
q = q.view(-1, num_heads, head_dim).transpose(0, 1)
|
||||
k = k.view(-1, num_heads, head_dim).transpose(0, 1)
|
||||
v = v.view(-1, num_heads, head_dim).transpose(0, 1)
|
||||
|
||||
dropout_p = 0.0
|
||||
attn_mask = attn_mask.unsqueeze(0)
|
||||
q = q.view(num_heads, -1, head_dim).unsqueeze(0)
|
||||
k = k.view(num_heads, -1, head_dim).unsqueeze(0)
|
||||
v = v.view(num_heads, -1, head_dim).unsqueeze(0)
|
||||
attn_output = scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
|
||||
attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(-1, embed_dim)
|
||||
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
|
||||
attn_output = attn_output.view(-1, 1, attn_output.size(1))
|
||||
|
||||
return attn_output
|
||||
@@ -0,0 +1,320 @@
|
||||
# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
|
||||
#
|
||||
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import random
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import Tensor
|
||||
|
||||
|
||||
class DoubleSwishFunction(torch.autograd.Function):
|
||||
"""
|
||||
double_swish(x) = x * torch.sigmoid(x-1)
|
||||
This is a definition, originally motivated by its close numerical
|
||||
similarity to swish(swish(x)), where swish(x) = x * sigmoid(x).
|
||||
|
||||
Memory-efficient derivative computation:
|
||||
double_swish(x) = x * s, where s(x) = torch.sigmoid(x-1)
|
||||
double_swish'(x) = d/dx double_swish(x) = x * s'(x) + x' * s(x) = x * s'(x) + s(x).
|
||||
Now, s'(x) = s(x) * (1-s(x)).
|
||||
double_swish'(x) = x * s'(x) + s(x).
|
||||
= x * s(x) * (1-s(x)) + s(x).
|
||||
= double_swish(x) * (1-s(x)) + s(x)
|
||||
... so we just need to remember s(x) but not x itself.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, x: Tensor) -> Tensor:
|
||||
requires_grad = x.requires_grad
|
||||
x_dtype = x.dtype
|
||||
if x.dtype == torch.float16:
|
||||
x = x.to(torch.float32)
|
||||
|
||||
s = torch.sigmoid(x - 1.0)
|
||||
y = x * s
|
||||
|
||||
if requires_grad:
|
||||
deriv = y * (1 - s) + s
|
||||
# notes on derivative of x * sigmoid(x - 1):
|
||||
# https://www.wolframalpha.com/input?i=d%2Fdx+%28x+*+sigmoid%28x-1%29%29
|
||||
# min \simeq -0.043638. Take floor as -0.043637 so it's a lower bund
|
||||
# max \simeq 1.1990. Take ceil to be 1.2 so it's an upper bound.
|
||||
# the combination of "+ torch.rand_like(deriv)" and casting to torch.uint8 (which
|
||||
# floors), should be expectation-preserving.
|
||||
floor = -0.043637
|
||||
ceil = 1.2
|
||||
d_scaled = (deriv - floor) * (255.0 / (ceil - floor)) + torch.rand_like(deriv)
|
||||
if __name__ == "__main__":
|
||||
# for self-testing only.
|
||||
assert d_scaled.min() >= 0.0
|
||||
assert d_scaled.max() < 256.0
|
||||
d_int = d_scaled.to(torch.uint8)
|
||||
ctx.save_for_backward(d_int)
|
||||
if x.dtype == torch.float16 or torch.is_autocast_enabled():
|
||||
y = y.to(torch.float16)
|
||||
return y
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, y_grad: Tensor) -> Tensor:
|
||||
(d,) = ctx.saved_tensors
|
||||
# the same constants as used in forward pass.
|
||||
floor = -0.043637
|
||||
ceil = 1.2
|
||||
d = d * ((ceil - floor) / 255.0) + floor
|
||||
return y_grad * d
|
||||
|
||||
|
||||
class DoubleSwish(torch.nn.Module):
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
"""Return double-swish activation function which is an approximation to Swish(Swish(x)),
|
||||
that we approximate closely with x * sigmoid(x-1).
|
||||
"""
|
||||
if torch.jit.is_scripting() or torch.jit.is_tracing():
|
||||
return x * torch.sigmoid(x - 1.0)
|
||||
return DoubleSwishFunction.apply(x)
|
||||
|
||||
|
||||
class ActivationBalancerFunction(torch.autograd.Function):
|
||||
@staticmethod
|
||||
def forward(
|
||||
ctx,
|
||||
x: Tensor,
|
||||
scale_factor: Tensor,
|
||||
sign_factor: Optional[Tensor],
|
||||
channel_dim: int,
|
||||
) -> Tensor:
|
||||
if channel_dim < 0:
|
||||
channel_dim += x.ndim
|
||||
ctx.channel_dim = channel_dim
|
||||
xgt0 = x > 0
|
||||
if sign_factor is None:
|
||||
ctx.save_for_backward(xgt0, scale_factor)
|
||||
else:
|
||||
ctx.save_for_backward(xgt0, scale_factor, sign_factor)
|
||||
return x
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, x_grad: Tensor) -> Tuple[Tensor, None, None, None]:
|
||||
if len(ctx.saved_tensors) == 3:
|
||||
xgt0, scale_factor, sign_factor = ctx.saved_tensors
|
||||
for _ in range(ctx.channel_dim, x_grad.ndim - 1):
|
||||
scale_factor = scale_factor.unsqueeze(-1)
|
||||
sign_factor = sign_factor.unsqueeze(-1)
|
||||
factor = sign_factor + scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
|
||||
else:
|
||||
xgt0, scale_factor = ctx.saved_tensors
|
||||
for _ in range(ctx.channel_dim, x_grad.ndim - 1):
|
||||
scale_factor = scale_factor.unsqueeze(-1)
|
||||
factor = scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
|
||||
neg_delta_grad = x_grad.abs() * factor
|
||||
return (
|
||||
x_grad - neg_delta_grad,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
|
||||
|
||||
def _compute_scale_factor(
|
||||
x: Tensor,
|
||||
channel_dim: int,
|
||||
min_abs: float,
|
||||
max_abs: float,
|
||||
gain_factor: float,
|
||||
max_factor: float,
|
||||
) -> Tensor:
|
||||
if channel_dim < 0:
|
||||
channel_dim += x.ndim
|
||||
sum_dims = [d for d in range(x.ndim) if d != channel_dim]
|
||||
x_abs_mean = torch.mean(x.abs(), dim=sum_dims).to(torch.float32)
|
||||
|
||||
if min_abs == 0.0:
|
||||
below_threshold = 0.0
|
||||
else:
|
||||
# below_threshold is 0 if x_abs_mean > min_abs, can be at most max_factor if
|
||||
# x_abs)_mean , min_abs.
|
||||
below_threshold = ((min_abs - x_abs_mean) * (gain_factor / min_abs)).clamp(min=0, max=max_factor)
|
||||
|
||||
above_threshold = ((x_abs_mean - max_abs) * (gain_factor / max_abs)).clamp(min=0, max=max_factor)
|
||||
|
||||
return below_threshold - above_threshold
|
||||
|
||||
|
||||
def _compute_sign_factor(
|
||||
x: Tensor,
|
||||
channel_dim: int,
|
||||
min_positive: float,
|
||||
max_positive: float,
|
||||
gain_factor: float,
|
||||
max_factor: float,
|
||||
) -> Tensor:
|
||||
if channel_dim < 0:
|
||||
channel_dim += x.ndim
|
||||
sum_dims = [d for d in range(x.ndim) if d != channel_dim]
|
||||
proportion_positive = torch.mean((x > 0).to(torch.float32), dim=sum_dims)
|
||||
if min_positive == 0.0:
|
||||
factor1 = 0.0
|
||||
else:
|
||||
# 0 if proportion_positive >= min_positive, else can be
|
||||
# as large as max_factor.
|
||||
factor1 = ((min_positive - proportion_positive) * (gain_factor / min_positive)).clamp_(min=0, max=max_factor)
|
||||
|
||||
if max_positive == 1.0:
|
||||
factor2 = 0.0
|
||||
else:
|
||||
# 0 if self.proportion_positive <= max_positive, else can be
|
||||
# as large as -max_factor.
|
||||
factor2 = ((proportion_positive - max_positive) * (gain_factor / (1.0 - max_positive))).clamp_(
|
||||
min=0, max=max_factor
|
||||
)
|
||||
sign_factor = factor1 - factor2
|
||||
# require min_positive != 0 or max_positive != 1:
|
||||
assert not isinstance(sign_factor, float)
|
||||
return sign_factor
|
||||
|
||||
|
||||
class ActivationBalancer(torch.nn.Module):
|
||||
"""
|
||||
Modifies the backpropped derivatives of a function to try to encourage, for
|
||||
each channel, that it is positive at least a proportion `threshold` of the
|
||||
time. It does this by multiplying negative derivative values by up to
|
||||
(1+max_factor), and positive derivative values by up to (1-max_factor),
|
||||
interpolated from 1 at the threshold to those extremal values when none
|
||||
of the inputs are positive.
|
||||
|
||||
Args:
|
||||
num_channels: the number of channels
|
||||
channel_dim: the dimension/axis corresponding to the channel, e.g.
|
||||
-1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
|
||||
min_positive: the minimum, per channel, of the proportion of the time
|
||||
that (x > 0), below which we start to modify the derivatives.
|
||||
max_positive: the maximum, per channel, of the proportion of the time
|
||||
that (x > 0), above which we start to modify the derivatives.
|
||||
max_factor: the maximum factor by which we modify the derivatives for
|
||||
either the sign constraint or the magnitude constraint;
|
||||
e.g. with max_factor=0.02, the the derivatives would be multiplied by
|
||||
values in the range [0.98..1.02].
|
||||
sign_gain_factor: determines the 'gain' with which we increase the
|
||||
change in gradient once the constraints on min_positive and max_positive
|
||||
are violated.
|
||||
scale_gain_factor: determines the 'gain' with which we increase the
|
||||
change in gradient once the constraints on min_abs and max_abs
|
||||
are violated.
|
||||
min_abs: the minimum average-absolute-value difference from the mean
|
||||
value per channel, which we allow, before we start to modify
|
||||
the derivatives to prevent this.
|
||||
max_abs: the maximum average-absolute-value difference from the mean
|
||||
value per channel, which we allow, before we start to modify
|
||||
the derivatives to prevent this.
|
||||
min_prob: determines the minimum probability with which we modify the
|
||||
gradients for the {min,max}_positive and {min,max}_abs constraints,
|
||||
on each forward(). This is done randomly to prevent all layers
|
||||
from doing it at the same time. Early in training we may use
|
||||
higher probabilities than this; it will decay to this value.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_channels: int,
|
||||
channel_dim: int,
|
||||
min_positive: float = 0.05,
|
||||
max_positive: float = 0.95,
|
||||
max_factor: float = 0.04,
|
||||
sign_gain_factor: float = 0.01,
|
||||
scale_gain_factor: float = 0.02,
|
||||
min_abs: float = 0.2,
|
||||
max_abs: float = 100.0,
|
||||
min_prob: float = 0.1,
|
||||
):
|
||||
super(ActivationBalancer, self).__init__()
|
||||
self.num_channels = num_channels
|
||||
self.channel_dim = channel_dim
|
||||
self.min_positive = min_positive
|
||||
self.max_positive = max_positive
|
||||
self.max_factor = max_factor
|
||||
self.min_abs = min_abs
|
||||
self.max_abs = max_abs
|
||||
self.min_prob = min_prob
|
||||
self.sign_gain_factor = sign_gain_factor
|
||||
self.scale_gain_factor = scale_gain_factor
|
||||
|
||||
# count measures how many times the forward() function has been called.
|
||||
# We occasionally sync this to a tensor called `count`, that exists to
|
||||
# make sure it is synced to disk when we load and save the model.
|
||||
self.cpu_count = 0
|
||||
self.register_buffer("count", torch.tensor(0, dtype=torch.int64))
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
if torch.jit.is_scripting() or not x.requires_grad or torch.jit.is_tracing():
|
||||
return _no_op(x)
|
||||
|
||||
count = self.cpu_count
|
||||
self.cpu_count += 1
|
||||
|
||||
if random.random() < 0.01:
|
||||
# Occasionally sync self.cpu_count with self.count.
|
||||
# count affects the decay of 'prob'. don't do this on every iter,
|
||||
# because syncing with the GPU is slow.
|
||||
self.cpu_count = max(self.cpu_count, self.count.item())
|
||||
self.count.fill_(self.cpu_count)
|
||||
|
||||
# the prob of doing some work exponentially decreases from 0.5 till it hits
|
||||
# a floor at min_prob (==0.1, by default)
|
||||
prob = max(self.min_prob, 0.5 ** (1 + (count / 4000.0)))
|
||||
|
||||
if random.random() < prob:
|
||||
sign_gain_factor = 0.5
|
||||
if self.min_positive != 0.0 or self.max_positive != 1.0:
|
||||
sign_factor = _compute_sign_factor(
|
||||
x,
|
||||
self.channel_dim,
|
||||
self.min_positive,
|
||||
self.max_positive,
|
||||
gain_factor=self.sign_gain_factor / prob,
|
||||
max_factor=self.max_factor,
|
||||
)
|
||||
else:
|
||||
sign_factor = None
|
||||
|
||||
scale_factor = _compute_scale_factor(
|
||||
x.detach(),
|
||||
self.channel_dim,
|
||||
min_abs=self.min_abs,
|
||||
max_abs=self.max_abs,
|
||||
gain_factor=self.scale_gain_factor / prob,
|
||||
max_factor=self.max_factor,
|
||||
)
|
||||
return ActivationBalancerFunction.apply(
|
||||
x,
|
||||
scale_factor,
|
||||
sign_factor,
|
||||
self.channel_dim,
|
||||
)
|
||||
else:
|
||||
return _no_op(x)
|
||||
|
||||
|
||||
def BalancedDoubleSwish(d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25) -> nn.Sequential:
|
||||
"""
|
||||
ActivationBalancer -> DoubleSwish
|
||||
"""
|
||||
balancer = ActivationBalancer(d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob)
|
||||
return nn.Sequential(
|
||||
balancer,
|
||||
DoubleSwish(),
|
||||
)
|
||||
@@ -0,0 +1,362 @@
|
||||
# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
|
||||
import copy
|
||||
import numbers
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
from AR.modules.activation import MultiheadAttention
|
||||
from AR.modules.scaling import BalancedDoubleSwish
|
||||
from torch import nn
|
||||
from torch import Tensor
|
||||
from torch.nn import functional as F
|
||||
|
||||
_shape_t = Union[int, List[int], torch.Size]
|
||||
|
||||
|
||||
class LayerNorm(nn.Module):
|
||||
__constants__ = ["normalized_shape", "eps", "elementwise_affine"]
|
||||
normalized_shape: Tuple[int, ...]
|
||||
eps: float
|
||||
elementwise_affine: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
normalized_shape: _shape_t,
|
||||
eps: float = 1e-5,
|
||||
elementwise_affine: bool = True,
|
||||
device=None,
|
||||
dtype=None,
|
||||
) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super(LayerNorm, self).__init__()
|
||||
if isinstance(normalized_shape, numbers.Integral):
|
||||
# mypy error: incompatible types in assignment
|
||||
normalized_shape = (normalized_shape,) # type: ignore[assignment]
|
||||
self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
|
||||
self.eps = eps
|
||||
self.elementwise_affine = elementwise_affine
|
||||
if self.elementwise_affine:
|
||||
self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
|
||||
self.bias = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
|
||||
else:
|
||||
self.register_parameter("weight", None)
|
||||
self.register_parameter("bias", None)
|
||||
|
||||
self.reset_parameters()
|
||||
|
||||
def reset_parameters(self) -> None:
|
||||
if self.elementwise_affine:
|
||||
nn.init.ones_(self.weight)
|
||||
nn.init.zeros_(self.bias)
|
||||
|
||||
def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
|
||||
if isinstance(input, tuple):
|
||||
input, embedding = input
|
||||
return (
|
||||
F.layer_norm(
|
||||
input,
|
||||
self.normalized_shape,
|
||||
self.weight,
|
||||
self.bias,
|
||||
self.eps,
|
||||
),
|
||||
embedding,
|
||||
)
|
||||
|
||||
assert embedding is None
|
||||
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return "{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}".format(**self.__dict__)
|
||||
|
||||
|
||||
class IdentityNorm(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int,
|
||||
eps: float = 1e-5,
|
||||
device=None,
|
||||
dtype=None,
|
||||
) -> None:
|
||||
super(IdentityNorm, self).__init__()
|
||||
|
||||
def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
|
||||
if isinstance(input, tuple):
|
||||
return input
|
||||
|
||||
assert embedding is None
|
||||
return input
|
||||
|
||||
|
||||
class TransformerEncoder(nn.Module):
|
||||
r"""TransformerEncoder is a stack of N encoder layers. Users can build the
|
||||
BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
|
||||
|
||||
Args:
|
||||
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
|
||||
num_layers: the number of sub-encoder-layers in the encoder (required).
|
||||
norm: the layer normalization component (optional).
|
||||
enable_nested_tensor: if True, input will automatically convert to nested tensor
|
||||
(and convert back on output). This will improve the overall performance of
|
||||
TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
|
||||
|
||||
Examples::
|
||||
>>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
|
||||
>>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
|
||||
>>> src = torch.rand(10, 32, 512)
|
||||
>>> out = transformer_encoder(src)
|
||||
"""
|
||||
|
||||
__constants__ = ["norm"]
|
||||
|
||||
def __init__(self, encoder_layer, num_layers, norm=None):
|
||||
super(TransformerEncoder, self).__init__()
|
||||
self.layers = _get_clones(encoder_layer, num_layers)
|
||||
self.num_layers = num_layers
|
||||
self.norm = norm
|
||||
|
||||
def forward(
|
||||
self,
|
||||
src: Tensor,
|
||||
mask: Optional[Tensor] = None,
|
||||
src_key_padding_mask: Optional[Tensor] = None,
|
||||
return_layer_states: bool = False,
|
||||
cache=None,
|
||||
) -> Tensor:
|
||||
r"""Pass the input through the encoder layers in turn.
|
||||
|
||||
Args:
|
||||
src: the sequence to the encoder (required).
|
||||
mask: the mask for the src sequence (optional).
|
||||
src_key_padding_mask: the mask for the src keys per batch (optional).
|
||||
return_layer_states: return layers' state (optional).
|
||||
|
||||
Shape:
|
||||
see the docs in Transformer class.
|
||||
"""
|
||||
if return_layer_states:
|
||||
layer_states = [] # layers' output
|
||||
output = src
|
||||
for mod in self.layers:
|
||||
output = mod(
|
||||
output,
|
||||
src_mask=mask,
|
||||
src_key_padding_mask=src_key_padding_mask,
|
||||
cache=cache,
|
||||
)
|
||||
layer_states.append(output[0])
|
||||
|
||||
if self.norm is not None:
|
||||
output = self.norm(output)
|
||||
|
||||
return layer_states, output
|
||||
|
||||
output = src
|
||||
for mod in self.layers:
|
||||
output = mod(
|
||||
output,
|
||||
src_mask=mask,
|
||||
src_key_padding_mask=src_key_padding_mask,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
if self.norm is not None:
|
||||
output = self.norm(output)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
class TransformerEncoderLayer(nn.Module):
|
||||
__constants__ = ["batch_first", "norm_first"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int,
|
||||
nhead: int,
|
||||
dim_feedforward: int = 2048,
|
||||
dropout: float = 0.1,
|
||||
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
|
||||
batch_first: bool = False,
|
||||
norm_first: bool = False,
|
||||
device=None,
|
||||
dtype=None,
|
||||
linear1_self_attention_cls: nn.Module = nn.Linear,
|
||||
linear2_self_attention_cls: nn.Module = nn.Linear,
|
||||
linear1_feedforward_cls: nn.Module = nn.Linear,
|
||||
linear2_feedforward_cls: nn.Module = nn.Linear,
|
||||
layer_norm_cls: nn.Module = LayerNorm,
|
||||
layer_norm_eps: float = 1e-5,
|
||||
adaptive_layer_norm=False,
|
||||
) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super(TransformerEncoderLayer, self).__init__()
|
||||
# print(233333333333,d_model,nhead)
|
||||
# import os
|
||||
# os._exit(2333333)
|
||||
self.self_attn = MultiheadAttention(
|
||||
d_model, # 512 16
|
||||
nhead,
|
||||
dropout=dropout,
|
||||
batch_first=batch_first,
|
||||
linear1_cls=linear1_self_attention_cls,
|
||||
linear2_cls=linear2_self_attention_cls,
|
||||
**factory_kwargs,
|
||||
)
|
||||
|
||||
# Implementation of Feedforward model
|
||||
self.linear1 = linear1_feedforward_cls(d_model, dim_feedforward, **factory_kwargs)
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.linear2 = linear2_feedforward_cls(dim_feedforward, d_model, **factory_kwargs)
|
||||
|
||||
self.norm_first = norm_first
|
||||
self.dropout1 = nn.Dropout(dropout)
|
||||
self.dropout2 = nn.Dropout(dropout)
|
||||
|
||||
# Legacy string support for activation function.
|
||||
if isinstance(activation, str):
|
||||
activation = _get_activation_fn(activation)
|
||||
elif isinstance(activation, partial):
|
||||
activation = activation(d_model)
|
||||
elif activation == BalancedDoubleSwish:
|
||||
activation = BalancedDoubleSwish(d_model)
|
||||
|
||||
# # We can't test self.activation in forward() in TorchScript,
|
||||
# # so stash some information about it instead.
|
||||
# if activation is F.relu or isinstance(activation, torch.nn.ReLU):
|
||||
# self.activation_relu_or_gelu = 1
|
||||
# elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
|
||||
# self.activation_relu_or_gelu = 2
|
||||
# else:
|
||||
# self.activation_relu_or_gelu = 0
|
||||
self.activation = activation
|
||||
|
||||
norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
|
||||
if layer_norm_cls == IdentityNorm:
|
||||
norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
|
||||
else:
|
||||
norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
|
||||
|
||||
if adaptive_layer_norm:
|
||||
self.norm1 = AdaptiveLayerNorm(d_model, norm1)
|
||||
self.norm2 = AdaptiveLayerNorm(d_model, norm2)
|
||||
else:
|
||||
self.norm1 = norm1
|
||||
self.norm2 = norm2
|
||||
|
||||
def __setstate__(self, state):
|
||||
super(TransformerEncoderLayer, self).__setstate__(state)
|
||||
if not hasattr(self, "activation"):
|
||||
self.activation = F.relu
|
||||
|
||||
def forward(
|
||||
self,
|
||||
src: Tensor,
|
||||
src_mask: Optional[Tensor] = None,
|
||||
src_key_padding_mask: Optional[Tensor] = None,
|
||||
cache=None,
|
||||
) -> Tensor:
|
||||
r"""Pass the input through the encoder layer.
|
||||
|
||||
Args:
|
||||
src: the sequence to the encoder layer (required).
|
||||
src_mask: the mask for the src sequence (optional).
|
||||
src_key_padding_mask: the mask for the src keys per batch (optional).
|
||||
|
||||
Shape:
|
||||
see the docs in Transformer class.
|
||||
"""
|
||||
x, stage_embedding = src, None
|
||||
is_src_tuple = False
|
||||
if isinstance(src, tuple):
|
||||
x, stage_embedding = src
|
||||
is_src_tuple = True
|
||||
|
||||
if src_key_padding_mask is not None:
|
||||
_skpm_dtype = src_key_padding_mask.dtype
|
||||
if _skpm_dtype != torch.bool and not torch.is_floating_point(src_key_padding_mask):
|
||||
raise AssertionError("only bool and floating types of key_padding_mask are supported")
|
||||
|
||||
if self.norm_first:
|
||||
x = x + self._sa_block(
|
||||
self.norm1(x, stage_embedding),
|
||||
src_mask,
|
||||
src_key_padding_mask,
|
||||
cache=cache,
|
||||
)
|
||||
x = x + self._ff_block(self.norm2(x, stage_embedding))
|
||||
else:
|
||||
x = self.norm1(
|
||||
x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
|
||||
stage_embedding,
|
||||
)
|
||||
x = self.norm2(x + self._ff_block(x), stage_embedding)
|
||||
|
||||
if is_src_tuple:
|
||||
return (x, stage_embedding)
|
||||
return x
|
||||
|
||||
# self-attention block
|
||||
def _sa_block(
|
||||
self,
|
||||
x: Tensor,
|
||||
attn_mask: Optional[Tensor],
|
||||
key_padding_mask: Optional[Tensor],
|
||||
cache=None,
|
||||
) -> Tensor:
|
||||
# print(x.shape,attn_mask.shape,key_padding_mask)
|
||||
# torch.Size([1, 188, 512]) torch.Size([188, 188]) None
|
||||
# import os
|
||||
# os._exit(23333)
|
||||
x = self.self_attn(
|
||||
x,
|
||||
x,
|
||||
x,
|
||||
attn_mask=attn_mask,
|
||||
key_padding_mask=key_padding_mask,
|
||||
need_weights=False,
|
||||
cache=cache,
|
||||
)[0]
|
||||
return self.dropout1(x)
|
||||
|
||||
# feed forward block
|
||||
def _ff_block(self, x: Tensor) -> Tensor:
|
||||
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
|
||||
return self.dropout2(x)
|
||||
|
||||
|
||||
class AdaptiveLayerNorm(nn.Module):
|
||||
r"""Adaptive Layer Normalization"""
|
||||
|
||||
def __init__(self, d_model, norm) -> None:
|
||||
super(AdaptiveLayerNorm, self).__init__()
|
||||
self.project_layer = nn.Linear(d_model, 2 * d_model)
|
||||
self.norm = norm
|
||||
self.d_model = d_model
|
||||
self.eps = self.norm.eps
|
||||
|
||||
def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
|
||||
if isinstance(input, tuple):
|
||||
input, embedding = input
|
||||
weight, bias = torch.split(
|
||||
self.project_layer(embedding),
|
||||
split_size_or_sections=self.d_model,
|
||||
dim=-1,
|
||||
)
|
||||
return (weight * self.norm(input) + bias, embedding)
|
||||
|
||||
weight, bias = torch.split(
|
||||
self.project_layer(embedding),
|
||||
split_size_or_sections=self.d_model,
|
||||
dim=-1,
|
||||
)
|
||||
return weight * self.norm(input) + bias
|
||||
|
||||
|
||||
def _get_clones(module, N):
|
||||
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
||||
@@ -0,0 +1,281 @@
|
||||
# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
|
||||
import copy
|
||||
import numbers
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
from AR.modules.activation_onnx import MultiheadAttention
|
||||
from AR.modules.scaling import BalancedDoubleSwish
|
||||
from torch import nn
|
||||
from torch import Tensor
|
||||
from torch.nn import functional as F
|
||||
|
||||
_shape_t = Union[int, List[int], torch.Size]
|
||||
|
||||
|
||||
class LayerNorm(nn.Module):
|
||||
__constants__ = ["normalized_shape", "eps", "elementwise_affine"]
|
||||
normalized_shape: Tuple[int, ...]
|
||||
eps: float
|
||||
elementwise_affine: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
normalized_shape: _shape_t,
|
||||
eps: float = 1e-5,
|
||||
elementwise_affine: bool = True,
|
||||
device=None,
|
||||
dtype=None,
|
||||
) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super(LayerNorm, self).__init__()
|
||||
if isinstance(normalized_shape, numbers.Integral):
|
||||
# mypy error: incompatible types in assignment
|
||||
normalized_shape = (normalized_shape,) # type: ignore[assignment]
|
||||
self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
|
||||
self.eps = eps
|
||||
self.elementwise_affine = elementwise_affine
|
||||
if self.elementwise_affine:
|
||||
self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
|
||||
self.bias = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
|
||||
else:
|
||||
self.register_parameter("weight", None)
|
||||
self.register_parameter("bias", None)
|
||||
|
||||
self.reset_parameters()
|
||||
|
||||
def reset_parameters(self) -> None:
|
||||
if self.elementwise_affine:
|
||||
nn.init.ones_(self.weight)
|
||||
nn.init.zeros_(self.bias)
|
||||
|
||||
def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
|
||||
if isinstance(input, tuple):
|
||||
input, embedding = input
|
||||
return (
|
||||
F.layer_norm(
|
||||
input,
|
||||
self.normalized_shape,
|
||||
self.weight,
|
||||
self.bias,
|
||||
self.eps,
|
||||
),
|
||||
embedding,
|
||||
)
|
||||
|
||||
assert embedding is None
|
||||
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return "{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}".format(**self.__dict__)
|
||||
|
||||
|
||||
class IdentityNorm(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int,
|
||||
eps: float = 1e-5,
|
||||
device=None,
|
||||
dtype=None,
|
||||
) -> None:
|
||||
super(IdentityNorm, self).__init__()
|
||||
|
||||
def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
|
||||
if isinstance(input, tuple):
|
||||
return input
|
||||
|
||||
assert embedding is None
|
||||
return input
|
||||
|
||||
|
||||
class TransformerEncoder(nn.Module):
|
||||
r"""TransformerEncoder is a stack of N encoder layers. Users can build the
|
||||
BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
|
||||
|
||||
Args:
|
||||
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
|
||||
num_layers: the number of sub-encoder-layers in the encoder (required).
|
||||
norm: the layer normalization component (optional).
|
||||
enable_nested_tensor: if True, input will automatically convert to nested tensor
|
||||
(and convert back on output). This will improve the overall performance of
|
||||
TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
|
||||
|
||||
Examples::
|
||||
>>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
|
||||
>>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
|
||||
>>> src = torch.rand(10, 32, 512)
|
||||
>>> out = transformer_encoder(src)
|
||||
"""
|
||||
|
||||
__constants__ = ["norm"]
|
||||
|
||||
def __init__(self, encoder_layer, num_layers, norm=None):
|
||||
super(TransformerEncoder, self).__init__()
|
||||
self.layers = _get_clones(encoder_layer, num_layers)
|
||||
self.num_layers = num_layers
|
||||
self.norm = norm
|
||||
|
||||
def forward(
|
||||
self,
|
||||
src: Tensor,
|
||||
mask: Optional[Tensor] = None,
|
||||
src_key_padding_mask: Optional[Tensor] = None,
|
||||
return_layer_states: bool = False,
|
||||
cache=None,
|
||||
) -> Tensor:
|
||||
output = src
|
||||
for mod in self.layers:
|
||||
output = mod(
|
||||
output,
|
||||
src_mask=mask,
|
||||
src_key_padding_mask=src_key_padding_mask,
|
||||
cache=cache,
|
||||
)
|
||||
|
||||
if self.norm is not None:
|
||||
output = self.norm(output)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
class TransformerEncoderLayer(nn.Module):
|
||||
__constants__ = ["batch_first", "norm_first"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int,
|
||||
nhead: int,
|
||||
dim_feedforward: int = 2048,
|
||||
dropout: float = 0.1,
|
||||
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
|
||||
batch_first: bool = False,
|
||||
norm_first: bool = False,
|
||||
device=None,
|
||||
dtype=None,
|
||||
linear1_self_attention_cls: nn.Module = nn.Linear,
|
||||
linear2_self_attention_cls: nn.Module = nn.Linear,
|
||||
linear1_feedforward_cls: nn.Module = nn.Linear,
|
||||
linear2_feedforward_cls: nn.Module = nn.Linear,
|
||||
layer_norm_cls: nn.Module = LayerNorm,
|
||||
layer_norm_eps: float = 1e-5,
|
||||
adaptive_layer_norm=False,
|
||||
) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super(TransformerEncoderLayer, self).__init__()
|
||||
self.self_attn = MultiheadAttention(
|
||||
d_model, # 512 16
|
||||
nhead,
|
||||
dropout=dropout,
|
||||
batch_first=batch_first,
|
||||
linear1_cls=linear1_self_attention_cls,
|
||||
linear2_cls=linear2_self_attention_cls,
|
||||
**factory_kwargs,
|
||||
)
|
||||
self.linear1 = linear1_feedforward_cls(d_model, dim_feedforward, **factory_kwargs)
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.linear2 = linear2_feedforward_cls(dim_feedforward, d_model, **factory_kwargs)
|
||||
self.norm_first = norm_first
|
||||
self.dropout1 = nn.Dropout(dropout)
|
||||
self.dropout2 = nn.Dropout(dropout)
|
||||
if isinstance(activation, str):
|
||||
activation = _get_activation_fn(activation)
|
||||
elif isinstance(activation, partial):
|
||||
activation = activation(d_model)
|
||||
elif activation == BalancedDoubleSwish:
|
||||
activation = BalancedDoubleSwish(d_model)
|
||||
self.activation = activation
|
||||
|
||||
norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
|
||||
if layer_norm_cls == IdentityNorm:
|
||||
norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
|
||||
else:
|
||||
norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
|
||||
|
||||
if adaptive_layer_norm:
|
||||
self.norm1 = AdaptiveLayerNorm(d_model, norm1)
|
||||
self.norm2 = AdaptiveLayerNorm(d_model, norm2)
|
||||
else:
|
||||
self.norm1 = norm1
|
||||
self.norm2 = norm2
|
||||
|
||||
def __setstate__(self, state):
|
||||
super(TransformerEncoderLayer, self).__setstate__(state)
|
||||
if not hasattr(self, "activation"):
|
||||
self.activation = F.relu
|
||||
|
||||
def forward(
|
||||
self,
|
||||
src: Tensor,
|
||||
src_mask: Optional[Tensor] = None,
|
||||
src_key_padding_mask: Optional[Tensor] = None,
|
||||
cache=None,
|
||||
) -> Tensor:
|
||||
x = src
|
||||
stage_embedding = None
|
||||
x = self.norm1(
|
||||
x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
|
||||
stage_embedding,
|
||||
)
|
||||
x = self.norm2(x + self._ff_block(x), stage_embedding)
|
||||
|
||||
return x
|
||||
|
||||
def _sa_block(
|
||||
self,
|
||||
x: Tensor,
|
||||
attn_mask: Optional[Tensor],
|
||||
key_padding_mask: Optional[Tensor],
|
||||
cache=None,
|
||||
) -> Tensor:
|
||||
x = self.self_attn(
|
||||
x,
|
||||
x,
|
||||
x,
|
||||
attn_mask=attn_mask,
|
||||
key_padding_mask=key_padding_mask,
|
||||
need_weights=False,
|
||||
cache=cache,
|
||||
)
|
||||
return self.dropout1(x)
|
||||
|
||||
def _ff_block(self, x: Tensor) -> Tensor:
|
||||
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
|
||||
return self.dropout2(x)
|
||||
|
||||
|
||||
class AdaptiveLayerNorm(nn.Module):
|
||||
r"""Adaptive Layer Normalization"""
|
||||
|
||||
def __init__(self, d_model, norm) -> None:
|
||||
super(AdaptiveLayerNorm, self).__init__()
|
||||
self.project_layer = nn.Linear(d_model, 2 * d_model)
|
||||
self.norm = norm
|
||||
self.d_model = d_model
|
||||
self.eps = self.norm.eps
|
||||
|
||||
def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
|
||||
if isinstance(input, tuple):
|
||||
input, embedding = input
|
||||
weight, bias = torch.split(
|
||||
self.project_layer(embedding),
|
||||
split_size_or_sections=self.d_model,
|
||||
dim=-1,
|
||||
)
|
||||
return (weight * self.norm(input) + bias, embedding)
|
||||
|
||||
weight, bias = torch.split(
|
||||
self.project_layer(embedding),
|
||||
split_size_or_sections=self.d_model,
|
||||
dim=-1,
|
||||
)
|
||||
return weight * self.norm(input) + bias
|
||||
|
||||
|
||||
def _get_clones(module, N):
|
||||
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
||||
@@ -0,0 +1,72 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
import itertools
|
||||
import re
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
|
||||
import regex
|
||||
from gruut import sentences
|
||||
from gruut.const import Sentence
|
||||
from gruut.const import Word
|
||||
from AR.text_processing.symbols import SYMBOL_TO_ID
|
||||
|
||||
|
||||
class GruutPhonemizer:
|
||||
def __init__(self, language: str):
|
||||
self._phonemizer = sentences
|
||||
self.lang = language
|
||||
self.symbol_to_id = SYMBOL_TO_ID
|
||||
self._special_cases_dict: Dict[str] = {
|
||||
r"\.\.\.": "... ",
|
||||
";": "; ",
|
||||
":": ": ",
|
||||
",": ", ",
|
||||
r"\.": ". ",
|
||||
"!": "! ",
|
||||
r"\?": "? ",
|
||||
"—": "—",
|
||||
"…": "… ",
|
||||
"«": "«",
|
||||
"»": "»",
|
||||
}
|
||||
self._punctuation_regexp: str = rf"([{''.join(self._special_cases_dict.keys())}])"
|
||||
|
||||
def _normalize_punctuation(self, text: str) -> str:
|
||||
text = regex.sub(rf"\pZ+{self._punctuation_regexp}", r"\1", text)
|
||||
text = regex.sub(rf"{self._punctuation_regexp}(\pL)", r"\1 \2", text)
|
||||
text = regex.sub(r"\pZ+", r" ", text)
|
||||
return text.strip()
|
||||
|
||||
def _convert_punctuation(self, word: Word) -> str:
|
||||
if not word.phonemes:
|
||||
return ""
|
||||
if word.phonemes[0] in ["‖", "|"]:
|
||||
return word.text.strip()
|
||||
|
||||
phonemes = "".join(word.phonemes)
|
||||
# remove modifier characters ˈˌː with regex
|
||||
phonemes = re.sub(r"[ˈˌː͡]", "", phonemes)
|
||||
return phonemes.strip()
|
||||
|
||||
def phonemize(self, text: str, espeak: bool = False) -> str:
|
||||
text_to_phonemize: str = self._normalize_punctuation(text)
|
||||
sents: List[Sentence] = [sent for sent in self._phonemizer(text_to_phonemize, lang="en-us", espeak=espeak)]
|
||||
words: List[str] = [self._convert_punctuation(word) for word in itertools.chain(*sents)]
|
||||
return " ".join(words)
|
||||
|
||||
def transform(self, phonemes):
|
||||
# convert phonemes to ids
|
||||
# dictionary is in symbols.py
|
||||
return [self.symbol_to_id[p] for p in phonemes if p in self.symbol_to_id.keys()]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
phonemizer = GruutPhonemizer("en-us")
|
||||
# text -> IPA
|
||||
phonemes = phonemizer.phonemize("Hello, wor-ld ?")
|
||||
print("phonemes:", phonemes)
|
||||
print("len(phonemes):", len(phonemes))
|
||||
phoneme_ids = phonemizer.transform(phonemes)
|
||||
print("phoneme_ids:", phoneme_ids)
|
||||
print("len(phoneme_ids):", len(phoneme_ids))
|
||||
@@ -0,0 +1,12 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
PAD = "_"
|
||||
PUNCTUATION = ';:,.!?¡¿—…"«»“” '
|
||||
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
IPA_LETTERS = (
|
||||
"ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
|
||||
)
|
||||
SYMBOLS = [PAD] + list(PUNCTUATION) + list(LETTERS) + list(IPA_LETTERS)
|
||||
SPACE_ID = SYMBOLS.index(" ")
|
||||
SYMBOL_TO_ID = {s: i for i, s in enumerate(SYMBOLS)}
|
||||
ID_TO_SYMBOL = {i: s for i, s in enumerate(SYMBOLS)}
|
||||
@@ -0,0 +1,36 @@
|
||||
import re
|
||||
|
||||
|
||||
def str2bool(str):
|
||||
return True if str.lower() == "true" else False
|
||||
|
||||
|
||||
def get_newest_ckpt(string_list):
|
||||
# 定义一个正则表达式模式,用于匹配字符串中的数字
|
||||
pattern = r"epoch=(\d+)-step=(\d+)\.ckpt"
|
||||
|
||||
# 使用正则表达式提取每个字符串中的数字信息,并创建一个包含元组的列表
|
||||
extracted_info = []
|
||||
for string in string_list:
|
||||
match = re.match(pattern, string)
|
||||
if match:
|
||||
epoch = int(match.group(1))
|
||||
step = int(match.group(2))
|
||||
extracted_info.append((epoch, step, string))
|
||||
# 按照 epoch 后面的数字和 step 后面的数字进行排序
|
||||
sorted_info = sorted(extracted_info, key=lambda x: (x[0], x[1]), reverse=True)
|
||||
# 获取最新的 ckpt 文件名
|
||||
newest_ckpt = sorted_info[0][2]
|
||||
return newest_ckpt
|
||||
|
||||
|
||||
# 文本存在且不为空时 return True
|
||||
def check_txt_file(file_path):
|
||||
try:
|
||||
with open(file_path, "r") as file:
|
||||
text = file.readline().strip()
|
||||
assert text.strip() != ""
|
||||
return text
|
||||
except Exception:
|
||||
return False
|
||||
return False
|
||||
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Initialize modules for espnet2 neural networks."""
|
||||
|
||||
import torch
|
||||
from typeguard import check_argument_types
|
||||
|
||||
|
||||
def initialize(model: torch.nn.Module, init: str):
|
||||
"""Initialize weights of a neural network module.
|
||||
|
||||
Parameters are initialized using the given method or distribution.
|
||||
|
||||
Custom initialization routines can be implemented into submodules
|
||||
as function `espnet_initialization_fn` within the custom module.
|
||||
|
||||
Args:
|
||||
model: Target.
|
||||
init: Method of initialization.
|
||||
"""
|
||||
assert check_argument_types()
|
||||
print("init with", init)
|
||||
|
||||
# weight init
|
||||
for p in model.parameters():
|
||||
if p.dim() > 1:
|
||||
if init == "xavier_uniform":
|
||||
torch.nn.init.xavier_uniform_(p.data)
|
||||
elif init == "xavier_normal":
|
||||
torch.nn.init.xavier_normal_(p.data)
|
||||
elif init == "kaiming_uniform":
|
||||
torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
|
||||
elif init == "kaiming_normal":
|
||||
torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
|
||||
else:
|
||||
raise ValueError("Unknown initialization: " + init)
|
||||
# bias init
|
||||
for name, p in model.named_parameters():
|
||||
if ".bias" in name and p.dim() == 1:
|
||||
p.data.zero_()
|
||||
30
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/utils/io.py
Normal file
30
ascend_910-gpt-sovits/GPT-SoVITS/GPT_SoVITS/AR/utils/io.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import sys
|
||||
|
||||
import torch
|
||||
import yaml
|
||||
|
||||
|
||||
def load_yaml_config(path):
|
||||
with open(path) as f:
|
||||
config = yaml.full_load(f)
|
||||
return config
|
||||
|
||||
|
||||
def save_config_to_yaml(config, path):
|
||||
assert path.endswith(".yaml")
|
||||
with open(path, "w") as f:
|
||||
f.write(yaml.dump(config))
|
||||
f.close()
|
||||
|
||||
|
||||
def write_args(args, path):
|
||||
args_dict = dict((name, getattr(args, name)) for name in dir(args) if not name.startswith("_"))
|
||||
with open(path, "a") as args_file:
|
||||
args_file.write("==> torch version: {}\n".format(torch.__version__))
|
||||
args_file.write("==> cudnn version: {}\n".format(torch.backends.cudnn.version()))
|
||||
args_file.write("==> Cmd:\n")
|
||||
args_file.write(str(sys.argv))
|
||||
args_file.write("\n==> args:\n")
|
||||
for k, v in sorted(args_dict.items()):
|
||||
args_file.write(" %s: %s\n" % (str(k), str(v)))
|
||||
args_file.close()
|
||||
Reference in New Issue
Block a user