初始化项目,由ModelHub XC社区提供模型
Model: zhou778899/test_case_ai Source: Original Platform
This commit is contained in:
53
.gitattributes
vendored
Normal file
53
.gitattributes
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
||||
*.tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
*.db* filter=lfs diff=lfs merge=lfs -text
|
||||
*.ark* filter=lfs diff=lfs merge=lfs -text
|
||||
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
|
||||
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
|
||||
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.gguf* filter=lfs diff=lfs merge=lfs -text
|
||||
*.ggml filter=lfs diff=lfs merge=lfs -text
|
||||
*.llamafile* filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
._____temp/deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text
|
||||
deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
._____temp/deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text
|
||||
deploy_result/20250414-081029.jsonl filter=lfs diff=lfs merge=lfs -text
|
||||
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
runs/
|
||||
images/
|
||||
16
added_tokens.json
Normal file
16
added_tokens.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"<eop>": 151334,
|
||||
"<sop>": 151333,
|
||||
"<|assistant|>": 151337,
|
||||
"<|begin_of_image|>": 151339,
|
||||
"<|begin_of_video|>": 151341,
|
||||
"<|end_of_image|>": 151340,
|
||||
"<|end_of_video|>": 151342,
|
||||
"<|endoftext|>": 151329,
|
||||
"<|observation|>": 151338,
|
||||
"<|system|>": 151335,
|
||||
"<|user|>": 151336,
|
||||
"[MASK]": 151330,
|
||||
"[gMASK]": 151331,
|
||||
"[sMASK]": 151332
|
||||
}
|
||||
52
config.json
Normal file
52
config.json
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"add_bias_linear": false,
|
||||
"add_qkv_bias": true,
|
||||
"apply_query_key_layer_scaling": true,
|
||||
"apply_residual_connection_post_layernorm": false,
|
||||
"architectures": [
|
||||
"ChatGLMForConditionalGeneration"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"attention_softmax_in_fp32": true,
|
||||
"auto_map": {
|
||||
"AutoConfig": "configuration_chatglm.ChatGLMConfig",
|
||||
"AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
|
||||
"AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
|
||||
"AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
|
||||
"AutoModelForSequenceClassification": "modeling_chatglm.ChatGLMForSequenceClassification"
|
||||
},
|
||||
"bias_dropout_fusion": true,
|
||||
"classifier_dropout": null,
|
||||
"eos_token_id": [
|
||||
151329,
|
||||
151336,
|
||||
151338
|
||||
],
|
||||
"ffn_hidden_size": 13696,
|
||||
"fp32_residual_connection": false,
|
||||
"hidden_dropout": 0.0,
|
||||
"hidden_size": 4096,
|
||||
"keys_to_ignore_at_inference": [
|
||||
"past_key_values"
|
||||
],
|
||||
"kv_channels": 128,
|
||||
"layernorm_epsilon": 1.5625e-07,
|
||||
"model_type": "chatglm",
|
||||
"multi_query_attention": true,
|
||||
"multi_query_group_num": 2,
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 40,
|
||||
"num_layers": 40,
|
||||
"original_rope": true,
|
||||
"pad_token_id": 151329,
|
||||
"padded_vocab_size": 151552,
|
||||
"post_layer_norm": true,
|
||||
"rmsnorm": true,
|
||||
"rope_ratio": 500,
|
||||
"seq_length": 131072,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.51.2",
|
||||
"use_cache": true,
|
||||
"vocab_size": 151552
|
||||
}
|
||||
1
configuration.json
Normal file
1
configuration.json
Normal file
@@ -0,0 +1 @@
|
||||
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
|
||||
58
configuration_chatglm.py
Normal file
58
configuration_chatglm.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from transformers import PretrainedConfig
|
||||
|
||||
|
||||
class ChatGLMConfig(PretrainedConfig):
|
||||
model_type = "chatglm"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_layers=28,
|
||||
padded_vocab_size=65024,
|
||||
hidden_size=4096,
|
||||
ffn_hidden_size=13696,
|
||||
kv_channels=128,
|
||||
num_attention_heads=32,
|
||||
seq_length=2048,
|
||||
hidden_dropout=0.0,
|
||||
classifier_dropout=None,
|
||||
attention_dropout=0.0,
|
||||
layernorm_epsilon=1e-5,
|
||||
rmsnorm=True,
|
||||
apply_residual_connection_post_layernorm=False,
|
||||
post_layer_norm=True,
|
||||
add_bias_linear=False,
|
||||
add_qkv_bias=False,
|
||||
bias_dropout_fusion=True,
|
||||
multi_query_attention=False,
|
||||
multi_query_group_num=1,
|
||||
rope_ratio=1,
|
||||
apply_query_key_layer_scaling=True,
|
||||
attention_softmax_in_fp32=True,
|
||||
fp32_residual_connection=False,
|
||||
**kwargs
|
||||
):
|
||||
self.num_layers = num_layers
|
||||
self.vocab_size = padded_vocab_size
|
||||
self.padded_vocab_size = padded_vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.ffn_hidden_size = ffn_hidden_size
|
||||
self.kv_channels = kv_channels
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.seq_length = seq_length
|
||||
self.hidden_dropout = hidden_dropout
|
||||
self.classifier_dropout = classifier_dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.layernorm_epsilon = layernorm_epsilon
|
||||
self.rmsnorm = rmsnorm
|
||||
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
|
||||
self.post_layer_norm = post_layer_norm
|
||||
self.add_bias_linear = add_bias_linear
|
||||
self.add_qkv_bias = add_qkv_bias
|
||||
self.bias_dropout_fusion = bias_dropout_fusion
|
||||
self.multi_query_attention = multi_query_attention
|
||||
self.multi_query_group_num = multi_query_group_num
|
||||
self.rope_ratio = rope_ratio
|
||||
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
|
||||
self.attention_softmax_in_fp32 = attention_softmax_in_fp32
|
||||
self.fp32_residual_connection = fp32_residual_connection
|
||||
super().__init__(**kwargs)
|
||||
13
generation_config.json
Normal file
13
generation_config.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"do_sample": true,
|
||||
"eos_token_id": [
|
||||
151329,
|
||||
151336,
|
||||
151338
|
||||
],
|
||||
"max_length": 128000,
|
||||
"pad_token_id": 151329,
|
||||
"temperature": 0.8,
|
||||
"top_p": 0.8,
|
||||
"transformers_version": "4.51.2"
|
||||
}
|
||||
3
model-00001-of-00004.safetensors
Normal file
3
model-00001-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b97c090d17eb9d025ff5fbe8ef54577923f0d78630c9980998178ad1eb675fdb
|
||||
size 4984147224
|
||||
3
model-00002-of-00004.safetensors
Normal file
3
model-00002-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:aa986163e1bbaee06261a9c8419927aa02b67c20ca8fe3ed4587d2c7c37927f9
|
||||
size 4895071360
|
||||
3
model-00003-of-00004.safetensors
Normal file
3
model-00003-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e5369c3d87ba0f782ba6cda95282e9419401bb998d08875d39f173d30935a967
|
||||
size 4895071384
|
||||
3
model-00004-of-00004.safetensors
Normal file
3
model-00004-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:14b3256c47e9ab10f4a3aa68a23375ea213ea6f56f0599a1ec98243d0ad4472f
|
||||
size 4025651256
|
||||
291
model.safetensors.index.json
Normal file
291
model.safetensors.index.json
Normal file
@@ -0,0 +1,291 @@
|
||||
{
|
||||
"metadata": {
|
||||
"total_size": 18799902784
|
||||
},
|
||||
"weight_map": {
|
||||
"transformer.embedding.word_embeddings.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.final_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.0.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.0.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.0.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.1.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.1.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.1.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.10.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.10.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.10.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.11.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.11.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.11.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.12.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.12.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.12.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.13.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.13.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.13.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.14.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.14.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.14.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.15.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.15.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.15.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.16.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.16.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.16.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.17.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.17.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.17.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.18.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.18.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.18.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.19.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.19.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.19.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.2.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.2.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.2.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.20.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.20.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.20.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.21.self_attention.dense.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.21.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.21.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.22.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.22.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.22.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.23.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.23.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.23.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.24.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.24.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.24.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.25.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.25.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.25.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.26.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.26.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.26.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.27.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.27.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.27.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.28.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.28.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.28.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.28.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.28.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.29.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.29.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.29.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.29.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.29.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.3.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.3.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.3.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.30.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.30.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.30.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.30.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.30.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.31.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.31.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.31.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.31.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.31.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.32.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.32.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.32.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.32.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.32.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.33.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.33.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.33.self_attention.dense.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.33.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.33.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
|
||||
"transformer.encoder.layers.34.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.34.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.34.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.34.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.34.self_attention.dense.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.34.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.34.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.35.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.35.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.35.self_attention.dense.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.35.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.35.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.36.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.36.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.36.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.36.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.36.self_attention.dense.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.36.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.36.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.37.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.37.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.37.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.37.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.37.self_attention.dense.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.37.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.37.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.38.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.38.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.38.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.38.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.38.self_attention.dense.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.38.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.38.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.39.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.39.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.39.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.39.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.39.self_attention.dense.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.39.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.39.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.encoder.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.4.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.4.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.4.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.5.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.5.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.5.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.6.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.6.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.6.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.7.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.7.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.7.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.8.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.8.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.8.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
|
||||
"transformer.encoder.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.9.self_attention.dense.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.9.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
|
||||
"transformer.encoder.layers.9.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
|
||||
"transformer.output_layer.weight": "model-00004-of-00004.safetensors",
|
||||
"transformer.rotary_pos_emb.inv_freq": "model-00001-of-00004.safetensors"
|
||||
}
|
||||
}
|
||||
1138
modeling_chatglm.py
Normal file
1138
modeling_chatglm.py
Normal file
File diff suppressed because it is too large
Load Diff
32
special_tokens_map.json
Normal file
32
special_tokens_map.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"additional_special_tokens": [
|
||||
"<|endoftext|>",
|
||||
"[MASK]",
|
||||
"[gMASK]",
|
||||
"[sMASK]",
|
||||
"<sop>",
|
||||
"<eop>",
|
||||
"<|system|>",
|
||||
"<|user|>",
|
||||
"<|assistant|>",
|
||||
"<|observation|>",
|
||||
"<|begin_of_image|>",
|
||||
"<|end_of_image|>",
|
||||
"<|begin_of_video|>",
|
||||
"<|end_of_video|>"
|
||||
],
|
||||
"eos_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
224
tokenization_chatglm.py
Normal file
224
tokenization_chatglm.py
Normal file
@@ -0,0 +1,224 @@
|
||||
import regex as re
|
||||
import base64
|
||||
import os
|
||||
import tiktoken
|
||||
from typing import List, Optional, Union, Dict
|
||||
from transformers import PreTrainedTokenizer
|
||||
from transformers.utils import PaddingStrategy
|
||||
from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
|
||||
|
||||
|
||||
class ChatGLM4Tokenizer(PreTrainedTokenizer):
|
||||
vocab_files_names = {"vocab_file": "tokenizer.model"}
|
||||
model_input_names = ["input_ids", "attention_mask", "position_ids"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
clean_up_tokenization_spaces=False,
|
||||
**kwargs
|
||||
):
|
||||
self.name = "GLM4Tokenizer"
|
||||
self.vocab_file = vocab_file
|
||||
pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
|
||||
self.pat_str = re.compile(pat_str)
|
||||
|
||||
mergeable_ranks = {}
|
||||
with open(vocab_file) as f:
|
||||
for line in f:
|
||||
token, rank = line.strip().split()
|
||||
rank = int(rank)
|
||||
token = base64.b64decode(token)
|
||||
mergeable_ranks[token] = rank
|
||||
|
||||
self.mergeable_ranks = mergeable_ranks
|
||||
|
||||
self.tokenizer = tiktoken.Encoding(
|
||||
name="my_tokenizer",
|
||||
pat_str=pat_str,
|
||||
mergeable_ranks=mergeable_ranks,
|
||||
special_tokens={}
|
||||
)
|
||||
self.decoder = {rank: token for token, rank in mergeable_ranks.items()}
|
||||
self.n_words = len(self.decoder)
|
||||
|
||||
super().__init__(
|
||||
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
@property
|
||||
def vocab_size(self):
|
||||
return self.n_words
|
||||
|
||||
def get_vocab(self):
|
||||
""" Returns vocab as a dict """
|
||||
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
|
||||
vocab.update(self.added_tokens_encoder)
|
||||
return vocab
|
||||
|
||||
def convert_tokens_to_string(self, tokens: List[Union[bytes, str, int]]) -> str:
|
||||
"""
|
||||
Converts a sequence of tokens in a single string.
|
||||
"""
|
||||
text = ""
|
||||
temp = b""
|
||||
for t in tokens:
|
||||
if isinstance(t, int):
|
||||
t = chr(t)
|
||||
if isinstance(t, str):
|
||||
if temp:
|
||||
text += temp.decode("utf-8", errors="replace")
|
||||
elif isinstance(t, bytes):
|
||||
temp += t
|
||||
else:
|
||||
raise TypeError("token should only be of type int, bytes or str")
|
||||
if temp:
|
||||
text += temp.decode("utf-8", errors="replace")
|
||||
return text
|
||||
|
||||
def _tokenize(self, text, **kwargs):
|
||||
tokens = []
|
||||
ids = self.tokenizer.encode(text)
|
||||
for t in ids:
|
||||
tokens.append(self.decoder[t])
|
||||
return tokens
|
||||
|
||||
def _convert_token_to_id(self, token):
|
||||
""" Converts a token (str) in an id using the vocab. """
|
||||
return self.mergeable_ranks[token]
|
||||
|
||||
def _convert_id_to_token(self, index):
|
||||
"""Converts an index (integer) in a token (str) using the vocab."""
|
||||
return self.decoder.get(index, "")
|
||||
|
||||
def save_vocabulary(self, save_directory, filename_prefix=None):
|
||||
"""
|
||||
Save the vocabulary and special tokens file to a directory.
|
||||
|
||||
Args:
|
||||
save_directory (`str`):
|
||||
The directory in which to save the vocabulary.
|
||||
filename_prefix (`str`, *optional*):
|
||||
An optional prefix to add to the named of the saved files.
|
||||
|
||||
Returns:
|
||||
`Tuple(str)`: Paths to the files saved.
|
||||
"""
|
||||
if os.path.isdir(save_directory):
|
||||
vocab_file = os.path.join(
|
||||
save_directory, self.vocab_files_names["vocab_file"]
|
||||
)
|
||||
else:
|
||||
vocab_file = save_directory
|
||||
|
||||
with open(self.vocab_file, 'rb') as fin:
|
||||
proto_str = fin.read()
|
||||
|
||||
with open(vocab_file, "wb") as writer:
|
||||
writer.write(proto_str)
|
||||
|
||||
return (vocab_file,)
|
||||
|
||||
def get_prefix_tokens(self):
|
||||
prefix_tokens = [self.convert_tokens_to_ids("[gMASK]"), self.convert_tokens_to_ids("<sop>")]
|
||||
return prefix_tokens
|
||||
|
||||
def build_single_message(self, role, metadata, message, tokenize=True):
|
||||
assert role in ["system", "user", "assistant", "observation"], role
|
||||
if tokenize:
|
||||
role_tokens = [self.convert_tokens_to_ids(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n",
|
||||
disallowed_special=())
|
||||
message_tokens = self.tokenizer.encode(message, disallowed_special=())
|
||||
tokens = role_tokens + message_tokens
|
||||
return tokens
|
||||
else:
|
||||
return str(f"<|{role}|>{metadata}\n{message}")
|
||||
|
||||
def build_inputs_with_special_tokens(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
||||
adding special tokens. A BERT sequence has the following format:
|
||||
|
||||
- single sequence: `[CLS] X [SEP]`
|
||||
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
||||
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs to which the special tokens will be added.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
|
||||
Returns:
|
||||
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
||||
"""
|
||||
prefix_tokens = self.get_prefix_tokens()
|
||||
token_ids_0 = prefix_tokens + token_ids_0
|
||||
if token_ids_1 is not None:
|
||||
token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids("<eos>")]
|
||||
return token_ids_0
|
||||
|
||||
def _pad(
|
||||
self,
|
||||
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
||||
max_length: Optional[int] = None,
|
||||
padding_side: str = "left",
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|
||||
|
||||
Args:
|
||||
encoded_inputs:
|
||||
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
|
||||
max_length: maximum length of the returned list and optionally padding length (see below).
|
||||
Will truncate by taking into account the special tokens.
|
||||
padding_strategy: PaddingStrategy to use for padding.
|
||||
|
||||
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
|
||||
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
|
||||
- PaddingStrategy.DO_NOT_PAD: Do not pad
|
||||
The tokenizer padding sides are defined in self.padding_side:
|
||||
|
||||
- 'left': pads on the left of the sequences
|
||||
- 'right': pads on the right of the sequences
|
||||
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
|
||||
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
|
||||
`>= 7.5` (Volta).
|
||||
return_attention_mask:
|
||||
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
|
||||
"""
|
||||
# Load from model defaults
|
||||
|
||||
required_input = encoded_inputs[self.model_input_names[0]]
|
||||
seq_length = len(required_input)
|
||||
|
||||
if padding_strategy == PaddingStrategy.LONGEST:
|
||||
max_length = len(required_input)
|
||||
|
||||
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
||||
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
||||
|
||||
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
|
||||
|
||||
# Initialize attention mask if not present.
|
||||
if "attention_mask" not in encoded_inputs:
|
||||
encoded_inputs["attention_mask"] = [1] * seq_length
|
||||
|
||||
if "position_ids" not in encoded_inputs:
|
||||
encoded_inputs["position_ids"] = list(range(seq_length))
|
||||
|
||||
if needs_to_be_padded:
|
||||
difference = max_length - len(required_input)
|
||||
|
||||
if "attention_mask" in encoded_inputs:
|
||||
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
|
||||
if "position_ids" in encoded_inputs:
|
||||
encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
|
||||
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
|
||||
|
||||
return encoded_inputs
|
||||
3
tokenizer.model
Normal file
3
tokenizer.model
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5a493598071550244b2ee7f26118f3edec2150b9dfa967929a99052ac83fe716
|
||||
size 2623634
|
||||
148
tokenizer_config.json
Normal file
148
tokenizer_config.json
Normal file
@@ -0,0 +1,148 @@
|
||||
{
|
||||
"added_tokens_decoder": {
|
||||
"151329": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151330": {
|
||||
"content": "[MASK]",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151331": {
|
||||
"content": "[gMASK]",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151332": {
|
||||
"content": "[sMASK]",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151333": {
|
||||
"content": "<sop>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151334": {
|
||||
"content": "<eop>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151335": {
|
||||
"content": "<|system|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151336": {
|
||||
"content": "<|user|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151337": {
|
||||
"content": "<|assistant|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151338": {
|
||||
"content": "<|observation|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151339": {
|
||||
"content": "<|begin_of_image|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151340": {
|
||||
"content": "<|end_of_image|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151341": {
|
||||
"content": "<|begin_of_video|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151342": {
|
||||
"content": "<|end_of_video|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [
|
||||
"<|endoftext|>",
|
||||
"[MASK]",
|
||||
"[gMASK]",
|
||||
"[sMASK]",
|
||||
"<sop>",
|
||||
"<eop>",
|
||||
"<|system|>",
|
||||
"<|user|>",
|
||||
"<|assistant|>",
|
||||
"<|observation|>",
|
||||
"<|begin_of_image|>",
|
||||
"<|end_of_image|>",
|
||||
"<|begin_of_video|>",
|
||||
"<|end_of_video|>"
|
||||
],
|
||||
"auto_map": {
|
||||
"AutoTokenizer": [
|
||||
"tokenization_chatglm.ChatGLM4Tokenizer",
|
||||
null
|
||||
]
|
||||
},
|
||||
"chat_template": "[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 GLM-4 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n在调用上述函数时,请使用 Json 格式表示调用的参数。{% elif tool['type'] == 'python' %}\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。{% elif tool['type'] == 'simple_browser' %}\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接打开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。{% elif tool['type'] == 'cogview' %}\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"do_lower_case": false,
|
||||
"eos_token": "<|endoftext|>",
|
||||
"extra_special_tokens": {},
|
||||
"model_max_length": 128000,
|
||||
"pad_token": "<|endoftext|>",
|
||||
"padding_side": "left",
|
||||
"remove_space": false,
|
||||
"tokenizer_class": "ChatGLM4Tokenizer"
|
||||
}
|
||||
Reference in New Issue
Block a user