初始化项目,由ModelHub XC社区提供模型
Model: othrif/wav2vec2-large-xlsr-arabic Source: Original Platform
This commit is contained in:
16
.gitattributes
vendored
Normal file
16
.gitattributes
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
127
README.md
Normal file
127
README.md
Normal file
@@ -0,0 +1,127 @@
|
||||
---
|
||||
language: ar
|
||||
datasets:
|
||||
- common_voice
|
||||
metrics:
|
||||
- wer
|
||||
tags:
|
||||
- audio
|
||||
- automatic-speech-recognition
|
||||
- speech
|
||||
- xlsr-fine-tuning-week
|
||||
license: apache-2.0
|
||||
model-index:
|
||||
- name: XLSR Wav2Vec2 Arabic by Othmane Rifki
|
||||
results:
|
||||
- task:
|
||||
name: Speech Recognition
|
||||
type: automatic-speech-recognition
|
||||
dataset:
|
||||
name: Common Voice ar
|
||||
type: common_voice
|
||||
args: ar
|
||||
metrics:
|
||||
- name: Test WER
|
||||
type: wer
|
||||
value: 46.77
|
||||
---
|
||||
|
||||
# Wav2Vec2-Large-XLSR-53-Arabic
|
||||
|
||||
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Arabic using the [Common Voice](https://huggingface.co/datasets/common_voice).
|
||||
When using this model, make sure that your speech input is sampled at 16kHz.
|
||||
|
||||
## Usage
|
||||
|
||||
The model can be used directly (without a language model) as follows:
|
||||
|
||||
```python
|
||||
import torch
|
||||
import torchaudio
|
||||
from datasets import load_dataset
|
||||
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
||||
|
||||
test_dataset = load_dataset("common_voice", "ar", split="test[:2%]")
|
||||
|
||||
processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-arabic")
|
||||
model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-arabic")
|
||||
|
||||
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# We need to read the audio files as arrays
|
||||
def speech_file_to_array_fn(batch):
|
||||
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
||||
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
||||
return batch
|
||||
|
||||
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
||||
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
||||
|
||||
with torch.no_grad():
|
||||
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
||||
|
||||
predicted_ids = torch.argmax(logits, dim=-1)
|
||||
|
||||
print("Prediction:", processor.batch_decode(predicted_ids))
|
||||
print("Reference:", test_dataset["sentence"][:2])
|
||||
```
|
||||
|
||||
|
||||
## Evaluation
|
||||
|
||||
The model can be evaluated as follows on the Arabic test data of Common Voice.
|
||||
|
||||
|
||||
```python
|
||||
import torch
|
||||
import torchaudio
|
||||
from datasets import load_dataset, load_metric
|
||||
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
||||
import re
|
||||
|
||||
test_dataset = load_dataset("common_voice", "ar", split="test")
|
||||
wer = load_metric("wer")
|
||||
|
||||
processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-arabic")
|
||||
model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-arabic")
|
||||
model.to("cuda")
|
||||
|
||||
chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\؛\\\\\\\\\\\\\\\\—\\\\\\\\\\\\\\\\_get\\\\\\\\\\\\\\\\«\\\\\\\\\\\\\\\\»\\\\\\\\\\\\\\\\ـ\\\\\\\\\\\\\\\\ـ\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\<EFBFBD>\\\\\\\\\\\\\\\\#\\\\\\\\\\\\\\\\،\\\\\\\\\\\\\\\\☭,\\\\\\\\\\\\\\\\؟]'
|
||||
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# We need to read the audio files as arrays
|
||||
def speech_file_to_array_fn(batch):
|
||||
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
||||
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
||||
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
||||
return batch
|
||||
|
||||
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# We need to read the audio files as arrays
|
||||
def evaluate(batch):
|
||||
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
||||
|
||||
with torch.no_grad():
|
||||
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
|
||||
|
||||
pred_ids = torch.argmax(logits, dim=-1)
|
||||
batch["pred_strings"] = processor.batch_decode(pred_ids)
|
||||
return batch
|
||||
|
||||
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
||||
|
||||
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
||||
```
|
||||
|
||||
**Test Result**: 46.77
|
||||
|
||||
|
||||
## Training
|
||||
|
||||
The Common Voice `train`, `validation` datasets were used for training.
|
||||
|
||||
The script used for training can be found [here](https://huggingface.co/othrif/wav2vec2-large-xlsr-arabic/tree/main)
|
||||
76
config.json
Normal file
76
config.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"_name_or_path": "facebook/wav2vec2-large-xlsr-53",
|
||||
"activation_dropout": 0.055,
|
||||
"apply_spec_augment": true,
|
||||
"architectures": [
|
||||
"Wav2Vec2ForCTC"
|
||||
],
|
||||
"attention_dropout": 0.094,
|
||||
"bos_token_id": 1,
|
||||
"conv_bias": true,
|
||||
"conv_dim": [
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512
|
||||
],
|
||||
"conv_kernel": [
|
||||
10,
|
||||
3,
|
||||
3,
|
||||
3,
|
||||
3,
|
||||
2,
|
||||
2
|
||||
],
|
||||
"conv_stride": [
|
||||
5,
|
||||
2,
|
||||
2,
|
||||
2,
|
||||
2,
|
||||
2,
|
||||
2
|
||||
],
|
||||
"ctc_loss_reduction": "mean",
|
||||
"ctc_zero_infinity": false,
|
||||
"do_stable_layer_norm": true,
|
||||
"eos_token_id": 2,
|
||||
"feat_extract_activation": "gelu",
|
||||
"feat_extract_dropout": 0.0,
|
||||
"feat_extract_norm": "layer",
|
||||
"feat_proj_dropout": 0.04,
|
||||
"final_dropout": 0.0,
|
||||
"gradient_checkpointing": true,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout": 0.047,
|
||||
"hidden_size": 1024,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 4096,
|
||||
"layer_norm_eps": 1e-05,
|
||||
"layerdrop": 0.041,
|
||||
"mask_channel_length": 10,
|
||||
"mask_channel_min_space": 1,
|
||||
"mask_channel_other": 0.0,
|
||||
"mask_channel_prob": 0.0,
|
||||
"mask_channel_selection": "static",
|
||||
"mask_feature_length": 10,
|
||||
"mask_feature_prob": 0.0,
|
||||
"mask_time_length": 10,
|
||||
"mask_time_min_space": 1,
|
||||
"mask_time_other": 0.0,
|
||||
"mask_time_prob": 0.082,
|
||||
"mask_time_selection": "static",
|
||||
"model_type": "wav2vec2",
|
||||
"num_attention_heads": 16,
|
||||
"num_conv_pos_embedding_groups": 16,
|
||||
"num_conv_pos_embeddings": 128,
|
||||
"num_feat_extract_layers": 7,
|
||||
"num_hidden_layers": 24,
|
||||
"pad_token_id": 49,
|
||||
"transformers_version": "4.4.0",
|
||||
"vocab_size": 50
|
||||
}
|
||||
46
evaluate.py
Normal file
46
evaluate.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import torch
|
||||
import torchaudio
|
||||
from datasets import load_dataset, load_metric
|
||||
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
||||
import re
|
||||
import tnkeeh as tn
|
||||
|
||||
test_dataset = load_dataset("common_voice", "ar", split="test")
|
||||
wer = load_metric("wer")
|
||||
|
||||
processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-arabic")
|
||||
model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-arabic")
|
||||
model.to("cuda")
|
||||
|
||||
#chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\'\<5C>]'
|
||||
chars_to_ignore_regex = '[\؛\—\_get\«\»\ـ\ـ\,\?\.\!\-\;\:\"\“\%\‘\”\<EFBFBD>\#\،\☭,\؟\'ۚ\چ\ڨ\ﺃ\ھ\ﻻ\'ۖ]'
|
||||
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
||||
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# We need to read the aduio files as arrays
|
||||
def speech_file_to_array_fn(batch):
|
||||
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
||||
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
||||
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
||||
return batch
|
||||
|
||||
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
||||
# For arabic diacritics
|
||||
cleander = tn.Tnkeeh(remove_diacritics=True)
|
||||
test_dataset = cleander.clean_hf_dataset(test_dataset, 'sentence')
|
||||
# Preprocessing the datasets.
|
||||
# We need to read the aduio files as arrays
|
||||
def evaluate(batch):
|
||||
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
||||
|
||||
with torch.no_grad():
|
||||
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
|
||||
|
||||
pred_ids = torch.argmax(logits, dim=-1)
|
||||
batch["pred_strings"] = processor.batch_decode(pred_ids)
|
||||
return batch
|
||||
|
||||
result = test_dataset.map(evaluate, batched=True, batch_size=32)
|
||||
|
||||
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
||||
40
finetune.sh
Executable file
40
finetune.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
export model_path=$1
|
||||
mkdir -p ${model_path}
|
||||
|
||||
python run_common_voice.py \
|
||||
--dataloader_num_workers="8" \
|
||||
--model_name_or_path="facebook/wav2vec2-large-xlsr-53" \
|
||||
--dataset_config_name="ar" \
|
||||
--output_dir=${model_path} \
|
||||
--num_train_epochs="50" \
|
||||
--per_device_train_batch_size="16" \
|
||||
--per_device_eval_batch_size="16" \
|
||||
--evaluation_strategy="steps" \
|
||||
--warmup_steps="500" \
|
||||
--fp16 \
|
||||
--freeze_feature_extractor \
|
||||
--save_steps="400" \
|
||||
--eval_steps="400" \
|
||||
--logging_steps="400" \
|
||||
--save_total_limit="1" \
|
||||
--group_by_length \
|
||||
--attention_dropout="0.094" \
|
||||
--activation_dropout="0.055" \
|
||||
--feat_proj_dropout="0.04" \
|
||||
--hidden_dropout="0.047" \
|
||||
--layerdrop="0.041" \
|
||||
--mask_time_prob="0.082" \
|
||||
--gradient_checkpointing \
|
||||
--learning_rate="3e-4" \
|
||||
--do_train --do_eval
|
||||
|
||||
|
||||
#--model_name_or_path="facebook/wav2vec2-large-xlsr-53" \
|
||||
#--overwrite_output_dir \
|
||||
#--model_name_or_path="/home/othrif/projects/wav2vec2/finetune-xlsr/models/ar/msa/wav2vec2-large-xlsr-arabic" \
|
||||
|
||||
|
||||
|
||||
|
||||
8
preprocessor_config.json
Normal file
8
preprocessor_config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"do_normalize": true,
|
||||
"feature_size": 1,
|
||||
"padding_side": "right",
|
||||
"padding_value": 0.0,
|
||||
"return_attention_mask": true,
|
||||
"sampling_rate": 16000
|
||||
}
|
||||
3
pytorch_model.bin
Normal file
3
pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f3a226cc02178fad03b0e8311c290e48dd677d144e66b148c221e43b4e104617
|
||||
size 1262138839
|
||||
518
run_common_voice.py
Normal file
518
run_common_voice.py
Normal file
@@ -0,0 +1,518 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchaudio
|
||||
from packaging import version
|
||||
from torch import nn
|
||||
|
||||
import transformers
|
||||
from transformers import (
|
||||
HfArgumentParser,
|
||||
Trainer,
|
||||
TrainingArguments,
|
||||
Wav2Vec2CTCTokenizer,
|
||||
Wav2Vec2FeatureExtractor,
|
||||
Wav2Vec2ForCTC,
|
||||
Wav2Vec2Processor,
|
||||
is_apex_available,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.trainer_utils import get_last_checkpoint, is_main_process
|
||||
|
||||
|
||||
if is_apex_available():
|
||||
from apex import amp
|
||||
|
||||
import tnkeeh as tn
|
||||
|
||||
if version.parse(torch.__version__) >= version.parse("1.6"):
|
||||
_is_native_amp_available = True
|
||||
from torch.cuda.amp import autocast
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def list_field(default=None, metadata=None):
|
||||
return field(default_factory=lambda: default, metadata=metadata)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
||||
"""
|
||||
|
||||
model_name_or_path: str = field(
|
||||
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
||||
)
|
||||
cache_dir: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
|
||||
)
|
||||
freeze_feature_extractor: Optional[bool] = field(
|
||||
default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."}
|
||||
)
|
||||
attention_dropout: Optional[float] = field(
|
||||
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."}
|
||||
)
|
||||
activation_dropout: Optional[float] = field(
|
||||
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
|
||||
)
|
||||
hidden_dropout: Optional[float] = field(
|
||||
default=0.1,
|
||||
metadata={
|
||||
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
|
||||
},
|
||||
)
|
||||
feat_proj_dropout: Optional[float] = field(
|
||||
default=0.1,
|
||||
metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."},
|
||||
)
|
||||
mask_time_prob: Optional[float] = field(
|
||||
default=0.05,
|
||||
metadata={
|
||||
"help": "Propability of each feature vector along the time axis to be chosen as the start of the vector"
|
||||
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
|
||||
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
|
||||
},
|
||||
)
|
||||
gradient_checkpointing: Optional[bool] = field(
|
||||
default=True,
|
||||
metadata={
|
||||
"help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass."
|
||||
},
|
||||
)
|
||||
layerdrop: Optional[float] = field(default=0.0, metadata={"help": "The LayerDrop probability."})
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataTrainingArguments:
|
||||
"""
|
||||
Arguments pertaining to what data we are going to input our model for training and eval.
|
||||
|
||||
Using `HfArgumentParser` we can turn this class
|
||||
into argparse arguments to be able to specify them on
|
||||
the command line.
|
||||
"""
|
||||
|
||||
dataset_config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
||||
)
|
||||
train_split_name: Optional[str] = field(
|
||||
default="train+validation",
|
||||
metadata={
|
||||
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
|
||||
},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
||||
)
|
||||
preprocessing_num_workers: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={"help": "The number of processes to use for the preprocessing."},
|
||||
)
|
||||
max_train_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
max_val_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
chars_to_ignore: List[str] = list_field(
|
||||
default=[",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "<EFBFBD>"],
|
||||
metadata={"help": "A list of characters to remove from the transcripts."},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataCollatorCTCWithPadding:
|
||||
"""
|
||||
Data collator that will dynamically pad the inputs received.
|
||||
Args:
|
||||
processor (:class:`~transformers.Wav2Vec2Processor`)
|
||||
The processor used for proccessing the data.
|
||||
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
|
||||
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
|
||||
among:
|
||||
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
||||
sequence if provided).
|
||||
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
|
||||
maximum acceptable input length for the model if that argument is not provided.
|
||||
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
|
||||
different lengths).
|
||||
max_length (:obj:`int`, `optional`):
|
||||
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
|
||||
max_length_labels (:obj:`int`, `optional`):
|
||||
Maximum length of the ``labels`` returned list and optionally padding length (see above).
|
||||
pad_to_multiple_of (:obj:`int`, `optional`):
|
||||
If set will pad the sequence to a multiple of the provided value.
|
||||
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
|
||||
7.5 (Volta).
|
||||
"""
|
||||
|
||||
processor: Wav2Vec2Processor
|
||||
padding: Union[bool, str] = True
|
||||
max_length: Optional[int] = None
|
||||
max_length_labels: Optional[int] = None
|
||||
pad_to_multiple_of: Optional[int] = None
|
||||
pad_to_multiple_of_labels: Optional[int] = None
|
||||
|
||||
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
|
||||
# split inputs and labels since they have to be of different lenghts and need
|
||||
# different padding methods
|
||||
input_features = [{"input_values": feature["input_values"]} for feature in features]
|
||||
label_features = [{"input_ids": feature["labels"]} for feature in features]
|
||||
|
||||
batch = self.processor.pad(
|
||||
input_features,
|
||||
padding=self.padding,
|
||||
max_length=self.max_length,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
with self.processor.as_target_processor():
|
||||
labels_batch = self.processor.pad(
|
||||
label_features,
|
||||
padding=self.padding,
|
||||
max_length=self.max_length_labels,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# replace padding with -100 to ignore loss correctly
|
||||
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
||||
|
||||
batch["labels"] = labels
|
||||
|
||||
return batch
|
||||
|
||||
|
||||
class CTCTrainer(Trainer):
|
||||
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
|
||||
"""
|
||||
Perform a training step on a batch of inputs.
|
||||
|
||||
Subclass and override to inject custom behavior.
|
||||
|
||||
Args:
|
||||
model (:obj:`nn.Module`):
|
||||
The model to train.
|
||||
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
|
||||
The inputs and targets of the model.
|
||||
|
||||
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
|
||||
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
|
||||
|
||||
Return:
|
||||
:obj:`torch.Tensor`: The tensor with training loss on this batch.
|
||||
"""
|
||||
|
||||
model.train()
|
||||
inputs = self._prepare_inputs(inputs)
|
||||
|
||||
if self.use_amp:
|
||||
with autocast():
|
||||
loss = self.compute_loss(model, inputs)
|
||||
else:
|
||||
loss = self.compute_loss(model, inputs)
|
||||
|
||||
if self.args.n_gpu > 1:
|
||||
if model.module.config.ctc_loss_reduction == "mean":
|
||||
loss = loss.mean()
|
||||
elif model.module.config.ctc_loss_reduction == "sum":
|
||||
loss = loss.sum() / (inputs["labels"] >= 0).sum()
|
||||
else:
|
||||
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
|
||||
|
||||
if self.args.gradient_accumulation_steps > 1:
|
||||
loss = loss / self.args.gradient_accumulation_steps
|
||||
|
||||
if self.use_amp:
|
||||
self.scaler.scale(loss).backward()
|
||||
elif self.use_apex:
|
||||
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
|
||||
scaled_loss.backward()
|
||||
elif self.deepspeed:
|
||||
self.deepspeed.backward(loss)
|
||||
else:
|
||||
loss.backward()
|
||||
|
||||
return loss.detach()
|
||||
|
||||
|
||||
def main():
|
||||
# See all possible arguments in src/transformers/training_args.py
|
||||
# or by passing the --help flag to this script.
|
||||
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
||||
|
||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
||||
# If we pass only one argument to the script and it's the path to a json file,
|
||||
# let's parse it to get our arguments.
|
||||
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
||||
else:
|
||||
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
||||
|
||||
# Detecting last checkpoint.
|
||||
last_checkpoint = None
|
||||
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
||||
last_checkpoint = get_last_checkpoint(training_args.output_dir)
|
||||
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
|
||||
raise ValueError(
|
||||
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
|
||||
"Use --overwrite_output_dir to overcome."
|
||||
)
|
||||
elif last_checkpoint is not None:
|
||||
logger.info(
|
||||
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
|
||||
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
|
||||
)
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
handlers=[logging.StreamHandler(sys.stdout)],
|
||||
)
|
||||
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
|
||||
|
||||
# Log on each process the small summary:
|
||||
logger.warning(
|
||||
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
|
||||
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
|
||||
)
|
||||
# Set the verbosity to info of the Transformers logger (on main process only):
|
||||
if is_main_process(training_args.local_rank):
|
||||
transformers.utils.logging.set_verbosity_info()
|
||||
logger.info("Training/evaluation parameters %s", training_args)
|
||||
|
||||
# Set seed before initializing model.
|
||||
set_seed(training_args.seed)
|
||||
|
||||
# Get the datasets:
|
||||
train_dataset = datasets.load_dataset(
|
||||
"common_voice", data_args.dataset_config_name, split=data_args.train_split_name
|
||||
)
|
||||
eval_dataset = datasets.load_dataset("common_voice", data_args.dataset_config_name, split="test")
|
||||
|
||||
# Create and save tokenizer
|
||||
#chars_to_ignore_regex = f'[{"".join(data_args.chars_to_ignore)}]'
|
||||
chars_to_ignore_regex = '[\؛\—\_get\«\»\ـ\ـ\,\?\.\!\-\;\:\"\“\%\‘\”\<EFBFBD>\#\،\☭,\؟\'ۚ\چ\ڨ\ﺃ\ھ\ﻻ\'ۖ]'
|
||||
|
||||
def remove_special_characters(batch):
|
||||
batch["text"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).lower() + " "
|
||||
return batch
|
||||
|
||||
# For arabic diacritics
|
||||
cleander = tn.Tnkeeh(remove_diacritics=True)
|
||||
train_dataset = cleander.clean_hf_dataset(train_dataset, 'sentence')
|
||||
eval_dataset = cleander.clean_hf_dataset(eval_dataset, 'sentence')
|
||||
|
||||
train_dataset = train_dataset.map(remove_special_characters, remove_columns=["sentence"])
|
||||
eval_dataset = eval_dataset.map(remove_special_characters, remove_columns=["sentence"])
|
||||
|
||||
def extract_all_chars(batch):
|
||||
all_text = " ".join(batch["text"])
|
||||
vocab = list(set(all_text))
|
||||
return {"vocab": [vocab], "all_text": [all_text]}
|
||||
|
||||
vocab_train = train_dataset.map(
|
||||
extract_all_chars,
|
||||
batched=True,
|
||||
batch_size=-1,
|
||||
keep_in_memory=True,
|
||||
remove_columns=train_dataset.column_names,
|
||||
)
|
||||
vocab_test = train_dataset.map(
|
||||
extract_all_chars,
|
||||
batched=True,
|
||||
batch_size=-1,
|
||||
keep_in_memory=True,
|
||||
remove_columns=eval_dataset.column_names,
|
||||
)
|
||||
|
||||
vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0]))
|
||||
vocab_dict = {v: k for k, v in enumerate(vocab_list)}
|
||||
vocab_dict["|"] = vocab_dict[" "]
|
||||
del vocab_dict[" "]
|
||||
vocab_dict["[UNK]"] = len(vocab_dict)
|
||||
vocab_dict["[PAD]"] = len(vocab_dict)
|
||||
|
||||
with open("vocab.json", "w") as vocab_file:
|
||||
json.dump(vocab_dict, vocab_file)
|
||||
|
||||
# Load pretrained model and tokenizer
|
||||
#
|
||||
# Distributed training:
|
||||
# The .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
tokenizer = Wav2Vec2CTCTokenizer(
|
||||
"vocab.json",
|
||||
unk_token="[UNK]",
|
||||
pad_token="[PAD]",
|
||||
word_delimiter_token="|",
|
||||
)
|
||||
feature_extractor = Wav2Vec2FeatureExtractor(
|
||||
feature_size=1, sampling_rate=16_000, padding_value=0.0, do_normalize=True, return_attention_mask=True
|
||||
)
|
||||
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
||||
model = Wav2Vec2ForCTC.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
activation_dropout=model_args.activation_dropout,
|
||||
attention_dropout=model_args.attention_dropout,
|
||||
hidden_dropout=model_args.hidden_dropout,
|
||||
feat_proj_dropout=model_args.feat_proj_dropout,
|
||||
mask_time_prob=model_args.mask_time_prob,
|
||||
gradient_checkpointing=model_args.gradient_checkpointing,
|
||||
layerdrop=model_args.layerdrop,
|
||||
ctc_loss_reduction="mean",
|
||||
pad_token_id=processor.tokenizer.pad_token_id,
|
||||
vocab_size=len(processor.tokenizer),
|
||||
)
|
||||
|
||||
if data_args.max_train_samples is not None:
|
||||
train_dataset = train_dataset.select(range(data_args.max_train_samples))
|
||||
|
||||
if data_args.max_val_samples is not None:
|
||||
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
|
||||
|
||||
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# We need to read the aduio files as arrays and tokenize the targets.
|
||||
def speech_file_to_array_fn(batch):
|
||||
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
||||
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
||||
batch["sampling_rate"] = 16_000
|
||||
batch["target_text"] = batch["text"]
|
||||
return batch
|
||||
|
||||
train_dataset = train_dataset.map(
|
||||
speech_file_to_array_fn,
|
||||
remove_columns=train_dataset.column_names,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
)
|
||||
eval_dataset = eval_dataset.map(
|
||||
speech_file_to_array_fn,
|
||||
remove_columns=eval_dataset.column_names,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
)
|
||||
|
||||
def prepare_dataset(batch):
|
||||
# check that all files have the correct sampling rate
|
||||
assert (
|
||||
len(set(batch["sampling_rate"])) == 1
|
||||
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
|
||||
batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values
|
||||
# Setup the processor for targets
|
||||
with processor.as_target_processor():
|
||||
batch["labels"] = processor(batch["target_text"]).input_ids
|
||||
return batch
|
||||
|
||||
train_dataset = train_dataset.map(
|
||||
prepare_dataset,
|
||||
remove_columns=train_dataset.column_names,
|
||||
batch_size=training_args.per_device_train_batch_size,
|
||||
batched=True,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
)
|
||||
eval_dataset = eval_dataset.map(
|
||||
prepare_dataset,
|
||||
remove_columns=eval_dataset.column_names,
|
||||
batch_size=training_args.per_device_train_batch_size,
|
||||
batched=True,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
)
|
||||
|
||||
# Metric
|
||||
wer_metric = datasets.load_metric("wer")
|
||||
|
||||
def compute_metrics(pred):
|
||||
pred_logits = pred.predictions
|
||||
pred_ids = np.argmax(pred_logits, axis=-1)
|
||||
|
||||
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
|
||||
|
||||
pred_str = processor.batch_decode(pred_ids)
|
||||
# we do not want to group tokens when computing the metrics
|
||||
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
|
||||
|
||||
wer = wer_metric.compute(predictions=pred_str, references=label_str)
|
||||
|
||||
return {"wer": wer}
|
||||
|
||||
if model_args.freeze_feature_extractor:
|
||||
model.freeze_feature_extractor()
|
||||
|
||||
# Data collator
|
||||
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
|
||||
|
||||
# Initialize our Trainer
|
||||
trainer = CTCTrainer(
|
||||
model=model,
|
||||
data_collator=data_collator,
|
||||
args=training_args,
|
||||
compute_metrics=compute_metrics,
|
||||
train_dataset=train_dataset if training_args.do_train else None,
|
||||
eval_dataset=eval_dataset if training_args.do_eval else None,
|
||||
tokenizer=processor.feature_extractor,
|
||||
)
|
||||
|
||||
# Training
|
||||
if training_args.do_train:
|
||||
if last_checkpoint is not None:
|
||||
checkpoint = last_checkpoint
|
||||
elif os.path.isdir(model_args.model_name_or_path):
|
||||
checkpoint = model_args.model_name_or_path
|
||||
else:
|
||||
checkpoint = None
|
||||
train_result = trainer.train(resume_from_checkpoint=checkpoint)
|
||||
trainer.save_model()
|
||||
|
||||
# save the feature_extractor and the tokenizer
|
||||
if is_main_process(training_args.local_rank):
|
||||
processor.save_pretrained(training_args.output_dir)
|
||||
|
||||
metrics = train_result.metrics
|
||||
max_train_samples = (
|
||||
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
|
||||
)
|
||||
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
|
||||
|
||||
trainer.log_metrics("train", metrics)
|
||||
trainer.save_metrics("train", metrics)
|
||||
trainer.save_state()
|
||||
|
||||
# Evaluation
|
||||
results = {}
|
||||
if training_args.do_eval:
|
||||
logger.info("*** Evaluate ***")
|
||||
metrics = trainer.evaluate()
|
||||
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
|
||||
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
|
||||
|
||||
trainer.log_metrics("eval", metrics)
|
||||
trainer.save_metrics("eval", metrics)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
special_tokens_map.json
Normal file
1
special_tokens_map.json
Normal file
@@ -0,0 +1 @@
|
||||
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
||||
1
tokenizer_config.json
Normal file
1
tokenizer_config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|"}
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
@@ -0,0 +1 @@
|
||||
{"ف": 0, "ی": 1, "إ": 2, "ا": 3, "ز": 4, "ء": 5, "ک": 6, "م": 7, "ص": 8, "ع": 10, "ؤ": 11, "ذ": 12, "ج": 13, "ٰ": 14, "ك": 15, "ش": 16, "ت": 17, "ه": 18, "غ": 19, "آ": 20, "ض": 21, "ظ": 22, "ل": 23, "ث": 24, "ٌ": 25, "ي": 26, "أ": 27, "ِ": 28, "ر": 29, "ْ": 30, "ة": 31, "ى": 32, "د": 33, "ُ": 34, "ً": 35, "و": 36, "ق": 37, "خ": 38, "ن": 39, "ط": 40, "ح": 41, "ئ": 42, "ّ": 43, "س": 44, "ب": 45, "ٍ": 46, "َ": 47, "|": 9, "[UNK]": 48, "[PAD]": 49}
|
||||
Reference in New Issue
Block a user