初始化项目,由ModelHub XC社区提供模型
Model: HWERI/pythia-70m-deduped-cleansharegpt-en Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
24
README.md
Normal file
24
README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
license: apache-2.0
|
||||
datasets:
|
||||
- shibing624/sharegpt_gpt4
|
||||
language:
|
||||
- en
|
||||
---
|
||||
|
||||
Pythia-70m-deduped finetuned on a cleaned version of ShareGPT data.
|
||||
The cleaned dataset is obtained by removing duplicates and paraphrases from the original corpus, and keeping only the English instance.
|
||||
The final training size is of 3507 instances.
|
||||
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_HWERI__pythia-70m-deduped-cleansharegpt-en)
|
||||
|
||||
| Metric | Value |
|
||||
|-----------------------|---------------------------|
|
||||
| Avg. | 25.06 |
|
||||
| ARC (25-shot) | 21.16 |
|
||||
| HellaSwag (10-shot) | 27.16 |
|
||||
| MMLU (5-shot) | 25.24 |
|
||||
| TruthfulQA (0-shot) | 48.57 |
|
||||
| Winogrande (5-shot) | 50.12 |
|
||||
| GSM8K (5-shot) | 0.0 |
|
||||
| DROP (3-shot) | 3.15 |
|
||||
31
config.json
Normal file
31
config.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"_name_or_path": "/mnt/llm10t/c00669034/models/pythia-70m-deduped",
|
||||
"architectures": [
|
||||
"GPTNeoXForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 0,
|
||||
"classifier_dropout": 0.1,
|
||||
"end_token_id": 0,
|
||||
"eos_token_id": 0,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout": 0.0,
|
||||
"hidden_size": 512,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 2048,
|
||||
"layer_norm_eps": 1e-05,
|
||||
"max_position_embeddings": 2048,
|
||||
"model_type": "gpt_neox",
|
||||
"num_attention_heads": 8,
|
||||
"num_hidden_layers": 6,
|
||||
"pad_token_id": 0,
|
||||
"rope_scaling": null,
|
||||
"rotary_emb_base": 10000,
|
||||
"rotary_pct": 0.25,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "float32",
|
||||
"transformers_version": "4.31.0",
|
||||
"use_cache": true,
|
||||
"use_parallel_residual": true,
|
||||
"vocab_size": 50280
|
||||
}
|
||||
50010
merges.txt
Normal file
50010
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
3
pytorch_model.bin
Normal file
3
pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:af8d8de2dca513f6e123100437273c4937b894f920c5dbb1e7e0ab6a5d9a2f72
|
||||
size 281632469
|
||||
100529
tokenizer.json
Normal file
100529
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
9
tokenizer_config.json
Normal file
9
tokenizer_config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"bos_token": "<|endoftext|>",
|
||||
"clean_up_tokenization_spaces": true,
|
||||
"eos_token": "<|endoftext|>",
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"tokenizer_class": "GPTNeoXTokenizer",
|
||||
"unk_token": "<|endoftext|>"
|
||||
}
|
||||
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user