初始化项目,由ModelHub XC社区提供模型
Model: Edresson/wav2vec2-large-xlsr-coraa-portuguese Source: Original Platform
This commit is contained in:
27
.gitattributes
vendored
Normal file
27
.gitattributes
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
84
README.md
Normal file
84
README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
---
|
||||
language: pt
|
||||
datasets:
|
||||
- CORAA
|
||||
metrics:
|
||||
- wer
|
||||
tags:
|
||||
- audio
|
||||
- speech
|
||||
- wav2vec2
|
||||
- pt
|
||||
- portuguese-speech-corpus
|
||||
- automatic-speech-recognition
|
||||
- hf-asr-leaderboard
|
||||
- speech
|
||||
- PyTorch
|
||||
license: apache-2.0
|
||||
model-index:
|
||||
- name: Edresson Casanova XLSR Wav2Vec2 Large 53 Portuguese
|
||||
results:
|
||||
- task:
|
||||
name: Speech Recognition
|
||||
type: automatic-speech-recognition
|
||||
dataset:
|
||||
name: CORAA
|
||||
type: CORAA
|
||||
args: pt
|
||||
metrics:
|
||||
- name: Test CORAA WER
|
||||
type: wer
|
||||
value: 25.26
|
||||
- task:
|
||||
name: Speech Recognition
|
||||
type: automatic-speech-recognition
|
||||
dataset:
|
||||
name: Common Voice 7
|
||||
type: mozilla-foundation/common_voice_7_0
|
||||
args: pt
|
||||
metrics:
|
||||
- name: Test WER on Common Voice 7
|
||||
type: wer
|
||||
value: 20.08
|
||||
---
|
||||
|
||||
# Wav2vec 2.0 trained with CORAA Portuguese Dataset
|
||||
|
||||
This a the demonstration of a fine-tuned Wav2vec model for Portuguese using the following [CORAA dataset](https://github.com/nilc-nlp/CORAA)
|
||||
|
||||
|
||||
|
||||
# Use this model
|
||||
|
||||
```python
|
||||
|
||||
from transformers import AutoTokenizer, Wav2Vec2ForCTC
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("Edresson/wav2vec2-large-xlsr-coraa-portuguese")
|
||||
|
||||
model = Wav2Vec2ForCTC.from_pretrained("Edresson/wav2vec2-large-xlsr-coraa-portuguese")
|
||||
```
|
||||
# Results
|
||||
For the results check the [CORAA article](https://arxiv.org/abs/2110.15731)
|
||||
|
||||
# Example test with Common Voice Dataset
|
||||
|
||||
|
||||
```python
|
||||
dataset = load_dataset("common_voice", "pt", split="test", data_dir="./cv-corpus-6.1-2020-12-11")
|
||||
|
||||
resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
|
||||
|
||||
def map_to_array(batch):
|
||||
speech, _ = torchaudio.load(batch["path"])
|
||||
batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
|
||||
batch["sampling_rate"] = resampler.new_freq
|
||||
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'")
|
||||
return batch
|
||||
```
|
||||
|
||||
```python
|
||||
ds = dataset.map(map_to_array)
|
||||
result = ds.map(map_to_pred, batched=True, batch_size=1, remove_columns=list(ds.features.keys()))
|
||||
print(wer.compute(predictions=result["predicted"], references=result["target"]))
|
||||
```
|
||||
84
config.json
Normal file
84
config.json
Normal file
@@ -0,0 +1,84 @@
|
||||
{
|
||||
"_name_or_path": "../checkpoints/Wav2Vec/CORAA-new/final-version/train/checkpoint-37810",
|
||||
"activation_dropout": 0.0,
|
||||
"apply_spec_augment": true,
|
||||
"architectures": [
|
||||
"Wav2Vec2ForCTC"
|
||||
],
|
||||
"attention_dropout": 0.1,
|
||||
"bos_token_id": 1,
|
||||
"codevector_dim": 768,
|
||||
"contrastive_logits_temperature": 0.1,
|
||||
"conv_bias": true,
|
||||
"conv_dim": [
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512,
|
||||
512
|
||||
],
|
||||
"conv_kernel": [
|
||||
10,
|
||||
3,
|
||||
3,
|
||||
3,
|
||||
3,
|
||||
2,
|
||||
2
|
||||
],
|
||||
"conv_stride": [
|
||||
5,
|
||||
2,
|
||||
2,
|
||||
2,
|
||||
2,
|
||||
2,
|
||||
2
|
||||
],
|
||||
"ctc_loss_reduction": "mean",
|
||||
"ctc_zero_infinity": true,
|
||||
"diversity_loss_weight": 0.1,
|
||||
"do_stable_layer_norm": true,
|
||||
"eos_token_id": 2,
|
||||
"feat_extract_activation": "gelu",
|
||||
"feat_extract_dropout": 0.0,
|
||||
"feat_extract_norm": "layer",
|
||||
"feat_proj_dropout": 0.1,
|
||||
"feat_quantizer_dropout": 0.0,
|
||||
"final_dropout": 0.0,
|
||||
"gradient_checkpointing": true,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout": 0.1,
|
||||
"hidden_size": 1024,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 4096,
|
||||
"layer_norm_eps": 1e-05,
|
||||
"layerdrop": 0.0,
|
||||
"mask_channel_length": 10,
|
||||
"mask_channel_min_space": 1,
|
||||
"mask_channel_other": 0.0,
|
||||
"mask_channel_prob": 0.0,
|
||||
"mask_channel_selection": "static",
|
||||
"mask_feature_length": 10,
|
||||
"mask_feature_prob": 0.0,
|
||||
"mask_time_length": 10,
|
||||
"mask_time_min_space": 1,
|
||||
"mask_time_other": 0.0,
|
||||
"mask_time_prob": 0.05,
|
||||
"mask_time_selection": "static",
|
||||
"model_type": "wav2vec2",
|
||||
"num_attention_heads": 16,
|
||||
"num_codevector_groups": 2,
|
||||
"num_codevectors_per_group": 320,
|
||||
"num_conv_pos_embedding_groups": 16,
|
||||
"num_conv_pos_embeddings": 128,
|
||||
"num_feat_extract_layers": 7,
|
||||
"num_hidden_layers": 24,
|
||||
"num_negatives": 100,
|
||||
"pad_token_id": 0,
|
||||
"proj_codevector_dim": 768,
|
||||
"transformers_version": "4.6.1",
|
||||
"vocab_size": 45
|
||||
}
|
||||
9
preprocessor_config.json
Normal file
9
preprocessor_config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"do_normalize": true,
|
||||
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
||||
"feature_size": 1,
|
||||
"padding_side": "right",
|
||||
"padding_value": 0.0,
|
||||
"return_attention_mask": true,
|
||||
"sampling_rate": 16000
|
||||
}
|
||||
3
pytorch_model.bin
Normal file
3
pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d6acf675835a40d35f1925b0b44f6e1bdfa47c793f2056efbddaa3b1d136c134
|
||||
size 1262113269
|
||||
1
special_tokens_map.json
Normal file
1
special_tokens_map.json
Normal file
@@ -0,0 +1 @@
|
||||
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
||||
1
tokenizer_config.json
Normal file
1
tokenizer_config.json
Normal file
@@ -0,0 +1 @@
|
||||
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|"}
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
@@ -0,0 +1 @@
|
||||
{"<pad>": 0, "|": 1, "<unk>": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "o": 17, "p": 18, "q": 19, "r": 20, "s": 21, "t": 22, "u": 23, "v": 24, "w": 25, "x": 26, "y": 27, "z": 28, "ç": 29, "ã": 30, "à": 31, "á": 32, "â": 33, "ê": 34, "é": 35, "í": 36, "ó": 37, "ô": 38, "õ": 39, "ú": 40, "û": 41, "-": 42, "<s>": 43, "</s>": 44}
|
||||
Reference in New Issue
Block a user