commit 2f2b6a957cb67f7f488604f657349f725f15147e Author: ModelHub XC Date: Mon Apr 27 18:20:52 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: EleutherAI/pythia-70m-seed1 Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..82c74d4 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,50 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text + + +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text + +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*.tfevents* filter=lfs diff=lfs merge=lfs -text +*.db* filter=lfs diff=lfs merge=lfs -text +*.ark* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.gguf* filter=lfs diff=lfs merge=lfs -text +*.ggml filter=lfs diff=lfs merge=lfs -text +*.llamafile* filter=lfs diff=lfs merge=lfs -text +*.pt2 filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text + +tokenizer.json filter=lfs diff=lfs merge=lfs -text +pytorch_model.bin filter=lfs diff=lfs merge=lfs -text \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..757731e --- /dev/null +++ b/README.md @@ -0,0 +1,170 @@ +--- +language: + - en +tags: + - pytorch + - causal-lm + - pythia + - polypythias +license: apache-2.0 +datasets: + - EleutherAI/pile + - EleutherAI/pile-preshuffled-seeds +library_name: transformers +arxiv: 2503.09543 +--- + +# PolyPythias + +This model is part of the **PolyPythias** suite, an extension of the [Pythia](https://github.com/EleutherAI/pythia) project providing 45 additional training runs across 5 model sizes with 9 different random seeds each. These models enable systematic study of training stability and reproducibility in language models. + +## Paper + +**[PolyPythias: Stability and Outliers across Fifty Language Model Pre-Training Runs](https://arxiv.org/abs/2503.09543)** + +Oskar van der Wal, Pietro Lesci, Max Muller-Eberstein, Naomi Saphra, Hailey Schoelkopf, Willem Zuidema, and Stella Biderman. *ICLR 2025*. + +## Model Details + +| Size | Parameters | Layers | Model Dim | Heads | Original Model | +|------|------------|--------|-----------|-------|----------------| +| 14M | 14M | 6 | 128 | 4 | [pythia-14m](https://huggingface.co/EleutherAI/pythia-14m) | +| 31M | 31M | 6 | 256 | 8 | [pythia-31m](https://huggingface.co/EleutherAI/pythia-31m) | +| 70M | 70M | 6 | 512 | 8 | [pythia-70m](https://huggingface.co/EleutherAI/pythia-70m) | +| 160M | 160M | 12 | 768 | 12 | [pythia-160m](https://huggingface.co/EleutherAI/pythia-160m) | +| 410M | 410M | 24 | 1024 | 16 | [pythia-410m](https://huggingface.co/EleutherAI/pythia-410m) | + +All models were trained on 300B tokens from [The Pile](https://pile.eleuther.ai/). + +## Naming Convention + +- **`pythia-{size}m`** - Original Pythia model (seed 1234) +- **`pythia-{size}m-seed{1-9}`** - PolyPythias variants with different random seeds +- **`pythia-160m-data-seed{1-3}`** - 160M models with only data ordering varied (weight init fixed) +- **`pythia-160m-weight-seed{1-3}`** - 160M models with only weight initialization varied (data order fixed) + +The decoupled seed variants (data-seed and weight-seed) allow researchers to separately study the effects of data ordering vs. weight initialization. + +## Quick Start + +```python +from transformers import GPTNeoXForCausalLM, AutoTokenizer + +# Load the final checkpoint +model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-70m-seed3") +tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m-seed3") + +# Generate text +inputs = tokenizer("The quick brown fox", return_tensors="pt") +outputs = model.generate(**inputs, max_new_tokens=20) +print(tokenizer.decode(outputs[0])) +``` + +## Available Checkpoints + +Each model provides **154 intermediate checkpoints** saved as Git branches: + +| Checkpoint | Training Tokens | Description | +|------------|-----------------|-------------| +| `step0` | 0 | Initialization (before training) | +| `step1`, `step2`, `step4`, ..., `step512` | 2M - 1B | 10 log-spaced early checkpoints | +| `step1000`, `step2000`, ..., `step143000` | 2B - 300B | 143 evenly-spaced checkpoints | + +To load a specific checkpoint: + +```python +model = GPTNeoXForCausalLM.from_pretrained( + "EleutherAI/pythia-70m-seed3", + revision="step50000", # Any checkpoint step +) +``` + +## Training Data + +All models were trained on The Pile using pre-shuffled data orderings. The shuffled index files for each seed are available at: + +**[EleutherAI/pile-preshuffled-seeds](https://huggingface.co/datasets/EleutherAI/pile-preshuffled-seeds)** + +This dataset contains `.idx` files for seeds 0-9 used with `MMapIndexedDataset` to load the memory-mapped Pile data in the correct order for each seed. + +### Reproducing Training Data Order + +To reproduce the exact data ordering used for a specific seed: + +1. Download the Pile dataset and tokenize it using the Pythia tokenizer +2. Download the corresponding seed folder from `pile-preshuffled-seeds`: + ```bash + # Using huggingface_hub + from huggingface_hub import snapshot_download + snapshot_download( + repo_id="EleutherAI/pile-preshuffled-seeds", + repo_type="dataset", + allow_patterns="seed3/*", # Download only seed3 + local_dir="./pile-seeds" + ) + ``` +3. Use the idx files with GPT-NeoX's `MMapIndexedDataset`: + ```python + from dataset import MMapIndexedDataset + dataset = MMapIndexedDataset(path_prefix, skip_warmup=True) + ``` + +For complete training reproduction instructions, see the [Pythia GitHub repository](https://github.com/EleutherAI/pythia). + +## All PolyPythias Models + +The complete collection is available at: [EleutherAI/polypythias](https://huggingface.co/collections/EleutherAI/polypythias) + +### 14M Parameter Models +- [pythia-14m-seed1](https://huggingface.co/EleutherAI/pythia-14m-seed1) through [pythia-14m-seed9](https://huggingface.co/EleutherAI/pythia-14m-seed9) + +### 31M Parameter Models +- [pythia-31m-seed1](https://huggingface.co/EleutherAI/pythia-31m-seed1) through [pythia-31m-seed9](https://huggingface.co/EleutherAI/pythia-31m-seed9) + +### 70M Parameter Models +- [pythia-70m-seed1](https://huggingface.co/EleutherAI/pythia-70m-seed1) through [pythia-70m-seed9](https://huggingface.co/EleutherAI/pythia-70m-seed9) + +### 160M Parameter Models +- [pythia-160m-seed1](https://huggingface.co/EleutherAI/pythia-160m-seed1) through [pythia-160m-seed9](https://huggingface.co/EleutherAI/pythia-160m-seed9) +- [pythia-160m-data-seed1](https://huggingface.co/EleutherAI/pythia-160m-data-seed1) through [pythia-160m-data-seed3](https://huggingface.co/EleutherAI/pythia-160m-data-seed3) +- [pythia-160m-weight-seed1](https://huggingface.co/EleutherAI/pythia-160m-weight-seed1) through [pythia-160m-weight-seed3](https://huggingface.co/EleutherAI/pythia-160m-weight-seed3) + +### 410M Parameter Models +- [pythia-410m-seed1](https://huggingface.co/EleutherAI/pythia-410m-seed1) through [pythia-410m-seed9](https://huggingface.co/EleutherAI/pythia-410m-seed9) + +## Evaluation Results + +Evaluation results for all models are available in the [polypythias-evals](https://huggingface.co/datasets/EleutherAI/polypythias-evals) dataset. + +## Limitations + +These models are released for research purposes only. They are **not** intended for deployment in production systems. + +- **Not instruction-tuned**: These are base language models that predict the next token; they will not follow instructions like ChatGPT +- **May generate harmful content**: The Pile contains diverse internet text that includes biased, offensive, and factually incorrect content +- **English only**: Models were trained primarily on English text +- **No safety filtering**: Outputs are not filtered for safety or accuracy + +## License + +Apache 2.0 + +## Contact + +For questions about these models, please use: +- [EleutherAI Discord](https://discord.gg/eleutherai) - #release-discussion channel +- [GitHub Issues](https://github.com/EleutherAI/pythia/issues) + +## Citation + +If you use these models, please cite: + +```bibtex +@inproceedings{vanderwal2025polypythias, + title={PolyPythias: Stability and Outliers across Fifty Language Model Pre-Training Runs}, + author={van der Wal, Oskar and Lesci, Pietro and Muller-Eberstein, Max and Saphra, Naomi and Schoelkopf, Hailey and Zuidema, Willem and Biderman, Stella}, + booktitle={International Conference on Learning Representations}, + year={2025}, + url={https://arxiv.org/abs/2503.09543} +} +``` diff --git a/config.json b/config.json new file mode 100644 index 0000000..0e26126 --- /dev/null +++ b/config.json @@ -0,0 +1,25 @@ +{ + "architectures": [ + "GPTNeoXForCausalLM" + ], + "bos_token_id": 0, + "classifier_dropout": 0.1, + "eos_token_id": 0, + "hidden_act": "gelu", + "hidden_size": 512, + "initializer_range": 0.02, + "intermediate_size": 2048, + "layer_norm_eps": 1e-05, + "max_position_embeddings": 2048, + "model_type": "gpt_neox", + "num_attention_heads": 8, + "num_hidden_layers": 6, + "rotary_emb_base": 10000, + "rotary_pct": 0.25, + "tie_word_embeddings": false, + "torch_dtype": "float16", + "transformers_version": "4.30.2", + "use_cache": true, + "use_parallel_residual": true, + "vocab_size": 50304 +} diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..159097f --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "others", "allow_remote": true} \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..fe8b129 --- /dev/null +++ b/generation_config.json @@ -0,0 +1,6 @@ +{ + "_from_model_config": true, + "bos_token_id": 0, + "eos_token_id": 0, + "transformers_version": "4.30.2" +} diff --git a/pytorch_model.bin b/pytorch_model.bin new file mode 100644 index 0000000..e590f89 --- /dev/null +++ b/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:680344e49722e363dfd20c239783f153007993c8e388b286e4c55ea759c124a8 +size 166049099 diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..0204ed1 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,5 @@ +{ + "bos_token": "<|endoftext|>", + "eos_token": "<|endoftext|>", + "unk_token": "<|endoftext|>" +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..79c8a4c --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cf430678137c8491ca82fb7092ee49e44ad38857fffe1e4a4a5ed860139a5b8 +size 2113738 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..a83f8f7 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,5 @@ +{ + "clean_up_tokenization_spaces": true, + "model_max_length": 1000000000000000019884624838656, + "tokenizer_class": "PreTrainedTokenizerFast" +}