初始化项目,由ModelHub XC社区提供模型
Model: benchang1110/Taiwan-tinyllama-v1.1-base Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
image.png filter=lfs diff=lfs merge=lfs -text
|
||||
65
README.md
Normal file
65
README.md
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
base_model:
|
||||
- TinyLlama/TinyLlama_v1.1
|
||||
datasets:
|
||||
- benchang1110/Taiwan-pretrain-9B
|
||||
- benchang1110/Taiwan-book-1B
|
||||
language:
|
||||
- zh
|
||||
library_name: transformers
|
||||
license: apache-2.0
|
||||
---
|
||||
|
||||
# Model Card for Model ID
|
||||
|
||||

|
||||
This is a continue-pretrained version of [Tinyllama-v1.1](TinyLlama/TinyLlama_v1.1) tailored for traditional Chinese. The continue-pretraining dataset contains over 10B tokens. Using bfloat16, the VRAM required during inference is only around 3GB!!!
|
||||
|
||||
# Usage
|
||||
**This is a causal language model not a chat model !** It is not designed to generate human-like responses. It is designed to generate text based on previous text.
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
|
||||
import torch
|
||||
from transformers import TextStreamer
|
||||
|
||||
def generate_response(input):
|
||||
'''
|
||||
simple test for the model
|
||||
'''
|
||||
# tokenzize the input
|
||||
tokenized_input = tokenizer.encode_plus(input, return_tensors='pt').to(device)
|
||||
print(tokenized_input['input_ids'])
|
||||
# generate the response
|
||||
_ = model.generate(
|
||||
input_ids=tokenized_input['input_ids'],
|
||||
attention_mask=tokenized_input['attention_mask'],
|
||||
pad_token_id=tokenizer.pad_token_id,
|
||||
do_sample=True,
|
||||
repetition_penalty=1.0,
|
||||
max_length=2048,
|
||||
streamer=streamer,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
model = AutoModelForCausalLM.from_pretrained("benchang1110/Taiwan-tinyllama-v1.1-base",attn_implementation="flash_attention_2",device_map=device,torch_dtype=torch.bfloat16)
|
||||
tokenizer = AutoTokenizer.from_pretrained("benchang1110/Taiwan-tinyllama-v1.1-base",use_fast=True)
|
||||
streamer = TextStreamer(tokenizer)
|
||||
while(True):
|
||||
text = input("input a simple prompt:")
|
||||
generate_response(text)
|
||||
```
|
||||
|
||||
### Training Procedure
|
||||
|
||||
The following training hyperparameters are used:
|
||||
|
||||
| Data size | Global Batch Size | Learning Rate | Epochs | Max Length | Weight Decay |
|
||||
|--------------|-------------------|---------------|--------|------------|--------------|
|
||||
| 10B | 32 | 5e-5 | 1 | 2048 | 1e-4 |
|
||||
|
||||

|
||||
### Compute Infrastructure
|
||||
1xA100(80GB), took approximately 200 GPU hours.
|
||||
30
config.json
Normal file
30
config.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"_name_or_path": "./checkpoint/checkpoint-80000",
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"head_dim": 64,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 2048,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 5632,
|
||||
"max_position_embeddings": 2048,
|
||||
"mlp_bias": false,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 22,
|
||||
"num_key_value_heads": 4,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": null,
|
||||
"rope_theta": 10000.0,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.48.0",
|
||||
"use_cache": true,
|
||||
"vocab_size": 32000
|
||||
}
|
||||
8
generation_config.json
Normal file
8
generation_config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"attn_implementation": "flash_attention_2",
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"max_length": 2048,
|
||||
"pad_token_id": 0,
|
||||
"transformers_version": "4.48.0"
|
||||
}
|
||||
3
image.png
Normal file
3
image.png
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:05b2d7eb99135708832e0a691e8c870baec69df74cd83594df360730fbc8e45c
|
||||
size 8487229
|
||||
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:1b442eb620531d6568e291458cf5fa7dd4b55a342d6ead6a0279b2378e3c3b77
|
||||
size 2200119864
|
||||
23
special_tokens_map.json
Normal file
23
special_tokens_map.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
3
tokenizer.model
Normal file
3
tokenizer.model
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
||||
size 499723
|
||||
45
tokenizer_config.json
Normal file
45
tokenizer_config.json
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"add_bos_token": true,
|
||||
"add_eos_token": false,
|
||||
"add_prefix_space": true,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"bos_token": "<s>",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "</s>",
|
||||
"extra_special_tokens": {},
|
||||
"legacy": false,
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"pad_token": null,
|
||||
"padding_side": "right",
|
||||
"sp_model_kwargs": {},
|
||||
"spaces_between_special_tokens": false,
|
||||
"tokenizer_class": "LlamaTokenizer",
|
||||
"unk_token": "<unk>",
|
||||
"use_default_system_prompt": false,
|
||||
"use_fast": false
|
||||
}
|
||||
Reference in New Issue
Block a user