初始化项目,由ModelHub XC社区提供模型
Model: Abdourakib/tinystories-gpt2-124m Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
69
README.md
Normal file
69
README.md
Normal file
@@ -0,0 +1,69 @@
|
||||
|
||||
---
|
||||
language: en
|
||||
tags:
|
||||
- gpt2
|
||||
- text-generation
|
||||
- children-stories
|
||||
- tinystories
|
||||
license: mit
|
||||
---
|
||||
|
||||
# TinyStories GPT2 124M
|
||||
|
||||
A GPT2 model trained from scratch on the
|
||||
TinyStories dataset to generate children's stories.
|
||||
|
||||
## Training Details
|
||||
- **Base Architecture:** GPT2 (124M parameters)
|
||||
- **Dataset:** karpathy/tinystories-gpt4-clean
|
||||
- **Training Steps:** 100,000
|
||||
- **Best Val Loss:** 1.1295
|
||||
- **Hardware:** NVIDIA RTX PRO 6000 (G4)
|
||||
|
||||
## How To Use
|
||||
```python
|
||||
from transformers import GPT2LMHeadModel
|
||||
from transformers import GPT2TokenizerFast
|
||||
import torch
|
||||
|
||||
model = GPT2LMHeadModel.from_pretrained(
|
||||
"{HF_USERNAME}/{MODEL_NAME}"
|
||||
)
|
||||
tokenizer = GPT2TokenizerFast.from_pretrained(
|
||||
"{HF_USERNAME}/{MODEL_NAME}"
|
||||
)
|
||||
|
||||
prompt = "Once upon a time there was a little cat"
|
||||
inputs = tokenizer(prompt, return_tensors="pt")
|
||||
|
||||
outputs = model.generate(
|
||||
inputs["input_ids"],
|
||||
max_new_tokens = 200,
|
||||
temperature = 0.8,
|
||||
top_p = 0.9,
|
||||
do_sample = True,
|
||||
repetition_penalty = 1.2,
|
||||
pad_token_id = tokenizer.eos_token_id
|
||||
)
|
||||
|
||||
story = tokenizer.decode(
|
||||
outputs[0],
|
||||
skip_special_tokens = True
|
||||
)
|
||||
print(story)
|
||||
```
|
||||
|
||||
## Example Output
|
||||
|
||||
"Once upon a time there was a little cat called
|
||||
Mimi. She loved to play with her toys, but one
|
||||
day she got very sad because she couldn't find
|
||||
her favorite toy. They searched everywhere and
|
||||
finally found it under the bed! Mimi was so
|
||||
happy and hugged her mom tight."
|
||||
|
||||
## Limitations
|
||||
- Generates children's stories only
|
||||
- Works best with story-style prompts
|
||||
- 512 token context window
|
||||
36
config.json
Normal file
36
config.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"activation_function": "gelu_new",
|
||||
"add_cross_attention": false,
|
||||
"architectures": [
|
||||
"GPT2LMHeadModel"
|
||||
],
|
||||
"attn_pdrop": 0.1,
|
||||
"bos_token_id": 50256,
|
||||
"dropout": 0.1,
|
||||
"dtype": "float32",
|
||||
"embd_pdrop": 0.1,
|
||||
"eos_token_id": 50256,
|
||||
"initializer_range": 0.02,
|
||||
"layer_norm_epsilon": 1e-05,
|
||||
"model_type": "gpt2",
|
||||
"n_ctx": 512,
|
||||
"n_embd": 768,
|
||||
"n_head": 12,
|
||||
"n_inner": null,
|
||||
"n_layer": 12,
|
||||
"n_positions": 512,
|
||||
"pad_token_id": null,
|
||||
"reorder_and_upcast_attn": false,
|
||||
"resid_pdrop": 0.1,
|
||||
"scale_attn_by_inverse_layer_idx": false,
|
||||
"scale_attn_weights": true,
|
||||
"summary_activation": null,
|
||||
"summary_first_dropout": 0.1,
|
||||
"summary_proj_to_labels": true,
|
||||
"summary_type": "cls_index",
|
||||
"summary_use_proj": true,
|
||||
"tie_word_embeddings": true,
|
||||
"transformers_version": "5.0.0",
|
||||
"use_cache": true,
|
||||
"vocab_size": 50265
|
||||
}
|
||||
9
generation_config.json
Normal file
9
generation_config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"_from_model_config": true,
|
||||
"bos_token_id": 50256,
|
||||
"eos_token_id": 50256,
|
||||
"output_attentions": false,
|
||||
"output_hidden_states": false,
|
||||
"transformers_version": "5.0.0",
|
||||
"use_cache": true
|
||||
}
|
||||
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c51ad2e5281c3136057571454a30ae9ca1084a6224986b7b31e7d515bb3f6244
|
||||
size 496225920
|
||||
250306
tokenizer.json
Normal file
250306
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
12
tokenizer_config.json
Normal file
12
tokenizer_config.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"backend": "tokenizers",
|
||||
"bos_token": "<|endoftext|>",
|
||||
"eos_token": "<|endoftext|>",
|
||||
"errors": "replace",
|
||||
"is_local": true,
|
||||
"model_max_length": 1024,
|
||||
"pad_token": "<|endoftext|>",
|
||||
"tokenizer_class": "GPT2Tokenizer",
|
||||
"unk_token": "<|endoftext|>"
|
||||
}
|
||||
Reference in New Issue
Block a user