初始化项目,由ModelHub XC社区提供模型
Model: tiny-random/llama-3.3-dim64 Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||
103
README.md
Normal file
103
README.md
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
library_name: transformers
|
||||
pipeline_tag: text-generation
|
||||
inference: true
|
||||
widget:
|
||||
- text: Hello!
|
||||
example_title: Hello world
|
||||
group: Python
|
||||
---
|
||||
|
||||
This tiny model is for debugging. It is randomly initialized with the config adapted from [meta-llama/Llama-3.3-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct).
|
||||
|
||||
### Example usage:
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
model_id = "tiny-random/llama-3.3-dim64"
|
||||
pipe = pipeline(
|
||||
"text-generation", model=model_id, device="cuda",
|
||||
trust_remote_code=True, max_new_tokens=3,
|
||||
)
|
||||
print(pipe("Hello World!"))
|
||||
```
|
||||
|
||||
### Codes to create this repo:
|
||||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
GenerationConfig,
|
||||
pipeline,
|
||||
set_seed,
|
||||
)
|
||||
|
||||
source_model_id = "meta-llama/Llama-3.3-70B-Instruct"
|
||||
save_folder = "/tmp/tiny-random/llama-3.3-dim64"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
source_model_id, trust_remote_code=True,
|
||||
)
|
||||
tokenizer.save_pretrained(save_folder)
|
||||
|
||||
config = AutoConfig.from_pretrained(
|
||||
source_model_id, trust_remote_code=True,
|
||||
)
|
||||
config.hidden_size = 64
|
||||
config.intermediate_size = 128
|
||||
config.num_attention_heads = 2
|
||||
config.num_key_value_heads = 1
|
||||
config.head_dim = 32
|
||||
config.num_hidden_layers = 2
|
||||
config.tie_word_embeddings = True
|
||||
|
||||
model = AutoModelForCausalLM.from_config(
|
||||
config,
|
||||
torch_dtype=torch.bfloat16,
|
||||
trust_remote_code=True,
|
||||
)
|
||||
model.generation_config = GenerationConfig.from_pretrained(
|
||||
source_model_id, trust_remote_code=True,
|
||||
)
|
||||
set_seed(42)
|
||||
with torch.no_grad():
|
||||
for name, p in sorted(model.named_parameters()):
|
||||
torch.nn.init.normal_(p, 0, 0.2)
|
||||
print(name, p.shape)
|
||||
model.save_pretrained(save_folder)
|
||||
```
|
||||
|
||||
### Printing the model:
|
||||
|
||||
```text
|
||||
LlamaForCausalLM(
|
||||
(model): LlamaModel(
|
||||
(embed_tokens): Embedding(128256, 64)
|
||||
(layers): ModuleList(
|
||||
(0-1): 2 x LlamaDecoderLayer(
|
||||
(self_attn): LlamaAttention(
|
||||
(q_proj): Linear(in_features=64, out_features=64, bias=False)
|
||||
(k_proj): Linear(in_features=64, out_features=32, bias=False)
|
||||
(v_proj): Linear(in_features=64, out_features=32, bias=False)
|
||||
(o_proj): Linear(in_features=64, out_features=64, bias=False)
|
||||
)
|
||||
(mlp): LlamaMLP(
|
||||
(gate_proj): Linear(in_features=64, out_features=128, bias=False)
|
||||
(up_proj): Linear(in_features=64, out_features=128, bias=False)
|
||||
(down_proj): Linear(in_features=128, out_features=64, bias=False)
|
||||
(act_fn): SiLU()
|
||||
)
|
||||
(input_layernorm): LlamaRMSNorm((64,), eps=1e-05)
|
||||
(post_attention_layernorm): LlamaRMSNorm((64,), eps=1e-05)
|
||||
)
|
||||
)
|
||||
(norm): LlamaRMSNorm((64,), eps=1e-05)
|
||||
(rotary_emb): LlamaRotaryEmbedding()
|
||||
)
|
||||
(lm_head): Linear(in_features=64, out_features=128256, bias=False)
|
||||
)
|
||||
```
|
||||
39
config.json
Normal file
39
config.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 128000,
|
||||
"eos_token_id": [
|
||||
128001,
|
||||
128008,
|
||||
128009
|
||||
],
|
||||
"head_dim": 32,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 64,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 128,
|
||||
"max_position_embeddings": 131072,
|
||||
"mlp_bias": false,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 2,
|
||||
"num_hidden_layers": 2,
|
||||
"num_key_value_heads": 1,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": {
|
||||
"factor": 8.0,
|
||||
"high_freq_factor": 4.0,
|
||||
"low_freq_factor": 1.0,
|
||||
"original_max_position_embeddings": 8192,
|
||||
"rope_type": "llama3"
|
||||
},
|
||||
"rope_theta": 500000.0,
|
||||
"tie_word_embeddings": true,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.51.3",
|
||||
"use_cache": true,
|
||||
"vocab_size": 128256
|
||||
}
|
||||
13
generation_config.json
Normal file
13
generation_config.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"bos_token_id": 128000,
|
||||
"do_sample": true,
|
||||
"eos_token_id": [
|
||||
128001,
|
||||
128008,
|
||||
128009
|
||||
],
|
||||
"temperature": 0.6,
|
||||
"top_p": 0.9,
|
||||
"transformers_version": "4.51.3",
|
||||
"trust_remote_code": true
|
||||
}
|
||||
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3b9eeff558ffba042b2780e6fb4926c2aec2769b9d918458f1f5d7f20aaeafad
|
||||
size 16567032
|
||||
23
special_tokens_map.json
Normal file
23
special_tokens_map.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<|begin_of_text|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|eot_id|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|finetune_right_pad_id|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
3
tokenizer.json
Normal file
3
tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
|
||||
size 17209920
|
||||
2064
tokenizer_config.json
Normal file
2064
tokenizer_config.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user