commit 4be20121a9641206d50babd5364e7cc6a8998369 Author: ModelHub XC Date: Fri May 8 11:35:50 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: Edresson/wav2vec2-large-xlsr-coraa-portuguese Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..6d34772 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,27 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bin.* filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..378694a --- /dev/null +++ b/README.md @@ -0,0 +1,84 @@ +--- +language: pt +datasets: +- CORAA +metrics: +- wer +tags: +- audio +- speech +- wav2vec2 +- pt +- portuguese-speech-corpus +- automatic-speech-recognition +- hf-asr-leaderboard +- speech +- PyTorch +license: apache-2.0 +model-index: +- name: Edresson Casanova XLSR Wav2Vec2 Large 53 Portuguese + results: + - task: + name: Speech Recognition + type: automatic-speech-recognition + dataset: + name: CORAA + type: CORAA + args: pt + metrics: + - name: Test CORAA WER + type: wer + value: 25.26 + - task: + name: Speech Recognition + type: automatic-speech-recognition + dataset: + name: Common Voice 7 + type: mozilla-foundation/common_voice_7_0 + args: pt + metrics: + - name: Test WER on Common Voice 7 + type: wer + value: 20.08 +--- + +# Wav2vec 2.0 trained with CORAA Portuguese Dataset + +This a the demonstration of a fine-tuned Wav2vec model for Portuguese using the following [CORAA dataset](https://github.com/nilc-nlp/CORAA) + + + +# Use this model + +```python + +from transformers import AutoTokenizer, Wav2Vec2ForCTC + +tokenizer = AutoTokenizer.from_pretrained("Edresson/wav2vec2-large-xlsr-coraa-portuguese") + +model = Wav2Vec2ForCTC.from_pretrained("Edresson/wav2vec2-large-xlsr-coraa-portuguese") +``` +# Results +For the results check the [CORAA article](https://arxiv.org/abs/2110.15731) + +# Example test with Common Voice Dataset + + +```python +dataset = load_dataset("common_voice", "pt", split="test", data_dir="./cv-corpus-6.1-2020-12-11") + +resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) + +def map_to_array(batch): + speech, _ = torchaudio.load(batch["path"]) + batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() + batch["sampling_rate"] = resampler.new_freq + batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") + return batch +``` + +```python +ds = dataset.map(map_to_array) +result = ds.map(map_to_pred, batched=True, batch_size=1, remove_columns=list(ds.features.keys())) +print(wer.compute(predictions=result["predicted"], references=result["target"])) +``` diff --git a/config.json b/config.json new file mode 100644 index 0000000..d39274d --- /dev/null +++ b/config.json @@ -0,0 +1,84 @@ +{ + "_name_or_path": "../checkpoints/Wav2Vec/CORAA-new/final-version/train/checkpoint-37810", + "activation_dropout": 0.0, + "apply_spec_augment": true, + "architectures": [ + "Wav2Vec2ForCTC" + ], + "attention_dropout": 0.1, + "bos_token_id": 1, + "codevector_dim": 768, + "contrastive_logits_temperature": 0.1, + "conv_bias": true, + "conv_dim": [ + 512, + 512, + 512, + 512, + 512, + 512, + 512 + ], + "conv_kernel": [ + 10, + 3, + 3, + 3, + 3, + 2, + 2 + ], + "conv_stride": [ + 5, + 2, + 2, + 2, + 2, + 2, + 2 + ], + "ctc_loss_reduction": "mean", + "ctc_zero_infinity": true, + "diversity_loss_weight": 0.1, + "do_stable_layer_norm": true, + "eos_token_id": 2, + "feat_extract_activation": "gelu", + "feat_extract_dropout": 0.0, + "feat_extract_norm": "layer", + "feat_proj_dropout": 0.1, + "feat_quantizer_dropout": 0.0, + "final_dropout": 0.0, + "gradient_checkpointing": true, + "hidden_act": "gelu", + "hidden_dropout": 0.1, + "hidden_size": 1024, + "initializer_range": 0.02, + "intermediate_size": 4096, + "layer_norm_eps": 1e-05, + "layerdrop": 0.0, + "mask_channel_length": 10, + "mask_channel_min_space": 1, + "mask_channel_other": 0.0, + "mask_channel_prob": 0.0, + "mask_channel_selection": "static", + "mask_feature_length": 10, + "mask_feature_prob": 0.0, + "mask_time_length": 10, + "mask_time_min_space": 1, + "mask_time_other": 0.0, + "mask_time_prob": 0.05, + "mask_time_selection": "static", + "model_type": "wav2vec2", + "num_attention_heads": 16, + "num_codevector_groups": 2, + "num_codevectors_per_group": 320, + "num_conv_pos_embedding_groups": 16, + "num_conv_pos_embeddings": 128, + "num_feat_extract_layers": 7, + "num_hidden_layers": 24, + "num_negatives": 100, + "pad_token_id": 0, + "proj_codevector_dim": 768, + "transformers_version": "4.6.1", + "vocab_size": 45 +} diff --git a/preprocessor_config.json b/preprocessor_config.json new file mode 100644 index 0000000..73caa15 --- /dev/null +++ b/preprocessor_config.json @@ -0,0 +1,9 @@ +{ + "do_normalize": true, + "feature_extractor_type": "Wav2Vec2FeatureExtractor", + "feature_size": 1, + "padding_side": "right", + "padding_value": 0.0, + "return_attention_mask": true, + "sampling_rate": 16000 +} diff --git a/pytorch_model.bin b/pytorch_model.bin new file mode 100644 index 0000000..2acaa21 --- /dev/null +++ b/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6acf675835a40d35f1925b0b44f6e1bdfa47c793f2056efbddaa3b1d136c134 +size 1262113269 diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..25bc396 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1 @@ +{"bos_token": "", "eos_token": "", "unk_token": "", "pad_token": ""} \ No newline at end of file diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..43772fe --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1 @@ +{"unk_token": "", "bos_token": "", "eos_token": "", "pad_token": "", "do_lower_case": false, "word_delimiter_token": "|"} \ No newline at end of file diff --git a/vocab.json b/vocab.json new file mode 100644 index 0000000..c981b82 --- /dev/null +++ b/vocab.json @@ -0,0 +1 @@ +{"": 0, "|": 1, "": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "o": 17, "p": 18, "q": 19, "r": 20, "s": 21, "t": 22, "u": 23, "v": 24, "w": 25, "x": 26, "y": 27, "z": 28, "ç": 29, "ã": 30, "à": 31, "á": 32, "â": 33, "ê": 34, "é": 35, "í": 36, "ó": 37, "ô": 38, "õ": 39, "ú": 40, "û": 41, "-": 42, "": 43, "": 44} \ No newline at end of file