commit 607e581055af4c43dfa68e58d67294c6299b7248 Author: ModelHub XC Date: Sun Apr 26 18:05:08 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: Xianjun/PLLaMa-13b-instruct Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..a6344aa --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..f34b509 --- /dev/null +++ b/README.md @@ -0,0 +1,47 @@ +--- +license: apache-2.0 +--- + +# Model Card for Model ID + + + +This model is optimized for plant science by continuing pertaining on over 1.5 million plant science academic articles based on LLaMa-2-13b-base. And it undergoes further instruction tuning to make it follow instructions. + + +- **Developed by:** [UCSB] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [LLaMa-2] + +- **Paper [optional]:** [https://arxiv.org/pdf/2401.01600.pdf] +- **Demo [optional]:** [More Information Needed] + +## How to Get Started with the Model +```python +from transformers import LlamaTokenizer, LlamaForCausalLM +import torch + +tokenizer = LlamaTokenizer.from_pretrained("Xianjun/PLLaMa-13b-instruct") +model = LlamaForCausalLM.from_pretrained("Xianjun/PLLaMa-13b-instruct").half().to("cuda") + +instruction = "How to ..." +batch = tokenizer(instruction, return_tensors="pt", add_special_tokens=False).to("cuda") +with torch.no_grad(): + output = model.generate(**batch, max_new_tokens=512, temperature=0.7, do_sample=True) + response = tokenizer.decode(output[0], skip_special_tokens=True) +``` + +## Citation +If you find PLLaMa useful in your research, please cite the following paper: + +```latex +@inproceedings{Yang2024PLLaMaAO, + title={PLLaMa: An Open-source Large Language Model for Plant Science}, + author={Xianjun Yang and Junfeng Gao and Wenxin Xue and Erik Alexandersson}, + year={2024}, + url={https://api.semanticscholar.org/CorpusID:266741610} +} +``` + + diff --git a/config.json b/config.json new file mode 100644 index 0000000..8a69fd1 --- /dev/null +++ b/config.json @@ -0,0 +1,27 @@ +{ + "_name_or_path": "/mnt/dsss_data/xianjun/project_data/plant_llama2_13b", + "architectures": [ + "LlamaForCausalLM" + ], + "bos_token_id": 1, + "end_token_id": 2, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 5120, + "initializer_range": 0.02, + "intermediate_size": 13824, + "max_position_embeddings": 2048, + "model_type": "llama", + "num_attention_heads": 40, + "num_hidden_layers": 40, + "num_key_value_heads": 40, + "pad_token_id": 2, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": null, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.28.1", + "use_cache": true, + "vocab_size": 32000 +} diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..fd00f3f --- /dev/null +++ b/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "eos_token_id": 2, + "pad_token_id": 2, + "transformers_version": "4.28.1" +} diff --git a/pytorch_model.bin b/pytorch_model.bin new file mode 100644 index 0000000..b9969a1 --- /dev/null +++ b/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5de0899e0453b11cb29e7d9b1815fd893d47afe49dc8df4188fb57ae9fc2ac56 +size 26031868013 diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..1e1a997 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,24 @@ +{ + "bos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "pad_token": "", + "unk_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.model b/tokenizer.model new file mode 100644 index 0000000..6c00c74 --- /dev/null +++ b/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 +size 499723 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..f56a323 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,34 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "clean_up_tokenization_spaces": false, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "model_max_length": 1024, + "pad_token": null, + "padding_side": "right", + "sp_model_kwargs": {}, + "tokenizer_class": "LlamaTokenizer", + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/trainer_state.json b/trainer_state.json new file mode 100644 index 0000000..22eabaa --- /dev/null +++ b/trainer_state.json @@ -0,0 +1,745 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 14.883720930232558, + "global_step": 120, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.12, + "learning_rate": 0.0, + "loss": 23.0645, + "step": 1 + }, + { + "epoch": 0.25, + "learning_rate": 5e-06, + "loss": 18.9746, + "step": 2 + }, + { + "epoch": 0.37, + "learning_rate": 7.924812503605782e-06, + "loss": 18.9805, + "step": 3 + }, + { + "epoch": 0.5, + "learning_rate": 1e-05, + "loss": 10.9014, + "step": 4 + }, + { + "epoch": 0.62, + "learning_rate": 1e-05, + "loss": 3.501, + "step": 5 + }, + { + "epoch": 0.74, + "learning_rate": 9.913793103448277e-06, + "loss": 2.9929, + "step": 6 + }, + { + "epoch": 0.87, + "learning_rate": 9.827586206896553e-06, + "loss": 2.4647, + "step": 7 + }, + { + "epoch": 0.99, + "learning_rate": 9.741379310344829e-06, + "loss": 2.2385, + "step": 8 + }, + { + "epoch": 1.12, + "learning_rate": 9.655172413793105e-06, + "loss": 2.0122, + "step": 9 + }, + { + "epoch": 1.24, + "learning_rate": 9.56896551724138e-06, + "loss": 1.7836, + "step": 10 + }, + { + "epoch": 1.36, + "learning_rate": 9.482758620689655e-06, + "loss": 1.7567, + "step": 11 + }, + { + "epoch": 1.49, + "learning_rate": 9.396551724137931e-06, + "loss": 1.6985, + "step": 12 + }, + { + "epoch": 1.61, + "learning_rate": 9.310344827586207e-06, + "loss": 1.726, + "step": 13 + }, + { + "epoch": 1.74, + "learning_rate": 9.224137931034484e-06, + "loss": 1.7654, + "step": 14 + }, + { + "epoch": 1.86, + "learning_rate": 9.13793103448276e-06, + "loss": 1.6182, + "step": 15 + }, + { + "epoch": 1.98, + "learning_rate": 9.051724137931036e-06, + "loss": 1.7335, + "step": 16 + }, + { + "epoch": 2.11, + "learning_rate": 8.965517241379312e-06, + "loss": 1.6503, + "step": 17 + }, + { + "epoch": 2.23, + "learning_rate": 8.879310344827588e-06, + "loss": 1.6674, + "step": 18 + }, + { + "epoch": 2.36, + "learning_rate": 8.793103448275862e-06, + "loss": 1.5766, + "step": 19 + }, + { + "epoch": 2.48, + "learning_rate": 8.706896551724138e-06, + "loss": 1.4727, + "step": 20 + }, + { + "epoch": 2.6, + "learning_rate": 8.620689655172414e-06, + "loss": 1.5703, + "step": 21 + }, + { + "epoch": 2.73, + "learning_rate": 8.53448275862069e-06, + "loss": 1.5568, + "step": 22 + }, + { + "epoch": 2.85, + "learning_rate": 8.448275862068966e-06, + "loss": 1.4897, + "step": 23 + }, + { + "epoch": 2.98, + "learning_rate": 8.362068965517242e-06, + "loss": 1.5204, + "step": 24 + }, + { + "epoch": 3.1, + "learning_rate": 8.275862068965518e-06, + "loss": 1.3769, + "step": 25 + }, + { + "epoch": 3.22, + "learning_rate": 8.189655172413794e-06, + "loss": 1.3522, + "step": 26 + }, + { + "epoch": 3.35, + "learning_rate": 8.103448275862069e-06, + "loss": 1.3126, + "step": 27 + }, + { + "epoch": 3.47, + "learning_rate": 8.017241379310345e-06, + "loss": 1.2707, + "step": 28 + }, + { + "epoch": 3.6, + "learning_rate": 7.93103448275862e-06, + "loss": 1.2974, + "step": 29 + }, + { + "epoch": 3.72, + "learning_rate": 7.844827586206897e-06, + "loss": 1.3587, + "step": 30 + }, + { + "epoch": 3.84, + "learning_rate": 7.758620689655173e-06, + "loss": 1.3098, + "step": 31 + }, + { + "epoch": 3.97, + "learning_rate": 7.672413793103449e-06, + "loss": 1.3593, + "step": 32 + }, + { + "epoch": 4.09, + "learning_rate": 7.586206896551724e-06, + "loss": 1.0844, + "step": 33 + }, + { + "epoch": 4.22, + "learning_rate": 7.500000000000001e-06, + "loss": 1.1842, + "step": 34 + }, + { + "epoch": 4.34, + "learning_rate": 7.413793103448277e-06, + "loss": 1.095, + "step": 35 + }, + { + "epoch": 4.47, + "learning_rate": 7.327586206896552e-06, + "loss": 1.0719, + "step": 36 + }, + { + "epoch": 4.59, + "learning_rate": 7.241379310344828e-06, + "loss": 1.1342, + "step": 37 + }, + { + "epoch": 4.71, + "learning_rate": 7.155172413793104e-06, + "loss": 1.1034, + "step": 38 + }, + { + "epoch": 4.84, + "learning_rate": 7.0689655172413796e-06, + "loss": 1.0181, + "step": 39 + }, + { + "epoch": 4.96, + "learning_rate": 6.982758620689656e-06, + "loss": 1.0376, + "step": 40 + }, + { + "epoch": 5.09, + "learning_rate": 6.896551724137932e-06, + "loss": 0.9732, + "step": 41 + }, + { + "epoch": 5.21, + "learning_rate": 6.810344827586207e-06, + "loss": 0.963, + "step": 42 + }, + { + "epoch": 5.33, + "learning_rate": 6.724137931034484e-06, + "loss": 0.8425, + "step": 43 + }, + { + "epoch": 5.46, + "learning_rate": 6.63793103448276e-06, + "loss": 0.8646, + "step": 44 + }, + { + "epoch": 5.58, + "learning_rate": 6.551724137931035e-06, + "loss": 0.8498, + "step": 45 + }, + { + "epoch": 5.71, + "learning_rate": 6.465517241379311e-06, + "loss": 0.8618, + "step": 46 + }, + { + "epoch": 5.83, + "learning_rate": 6.379310344827587e-06, + "loss": 0.8139, + "step": 47 + }, + { + "epoch": 5.95, + "learning_rate": 6.293103448275862e-06, + "loss": 0.7988, + "step": 48 + }, + { + "epoch": 6.08, + "learning_rate": 6.206896551724138e-06, + "loss": 0.7165, + "step": 49 + }, + { + "epoch": 6.2, + "learning_rate": 6.1206896551724135e-06, + "loss": 0.7268, + "step": 50 + }, + { + "epoch": 6.33, + "learning_rate": 6.03448275862069e-06, + "loss": 0.6033, + "step": 51 + }, + { + "epoch": 6.45, + "learning_rate": 5.9482758620689665e-06, + "loss": 0.6308, + "step": 52 + }, + { + "epoch": 6.57, + "learning_rate": 5.862068965517242e-06, + "loss": 0.7198, + "step": 53 + }, + { + "epoch": 6.7, + "learning_rate": 5.775862068965518e-06, + "loss": 0.6517, + "step": 54 + }, + { + "epoch": 6.82, + "learning_rate": 5.689655172413794e-06, + "loss": 0.6819, + "step": 55 + }, + { + "epoch": 6.95, + "learning_rate": 5.603448275862069e-06, + "loss": 0.588, + "step": 56 + }, + { + "epoch": 7.07, + "learning_rate": 5.517241379310345e-06, + "loss": 0.5563, + "step": 57 + }, + { + "epoch": 7.19, + "learning_rate": 5.431034482758621e-06, + "loss": 0.5104, + "step": 58 + }, + { + "epoch": 7.32, + "learning_rate": 5.344827586206896e-06, + "loss": 0.4967, + "step": 59 + }, + { + "epoch": 7.44, + "learning_rate": 5.258620689655173e-06, + "loss": 0.5056, + "step": 60 + }, + { + "epoch": 7.57, + "learning_rate": 5.172413793103449e-06, + "loss": 0.5318, + "step": 61 + }, + { + "epoch": 7.69, + "learning_rate": 5.086206896551724e-06, + "loss": 0.4791, + "step": 62 + }, + { + "epoch": 7.81, + "learning_rate": 5e-06, + "loss": 0.4324, + "step": 63 + }, + { + "epoch": 7.94, + "learning_rate": 4.9137931034482765e-06, + "loss": 0.4274, + "step": 64 + }, + { + "epoch": 8.06, + "learning_rate": 4.8275862068965525e-06, + "loss": 0.3601, + "step": 65 + }, + { + "epoch": 8.19, + "learning_rate": 4.741379310344828e-06, + "loss": 0.3549, + "step": 66 + }, + { + "epoch": 8.31, + "learning_rate": 4.655172413793104e-06, + "loss": 0.375, + "step": 67 + }, + { + "epoch": 8.43, + "learning_rate": 4.56896551724138e-06, + "loss": 0.3297, + "step": 68 + }, + { + "epoch": 8.56, + "learning_rate": 4.482758620689656e-06, + "loss": 0.3315, + "step": 69 + }, + { + "epoch": 8.68, + "learning_rate": 4.396551724137931e-06, + "loss": 0.2843, + "step": 70 + }, + { + "epoch": 8.81, + "learning_rate": 4.310344827586207e-06, + "loss": 0.2527, + "step": 71 + }, + { + "epoch": 8.93, + "learning_rate": 4.224137931034483e-06, + "loss": 0.3456, + "step": 72 + }, + { + "epoch": 9.05, + "learning_rate": 4.137931034482759e-06, + "loss": 0.2933, + "step": 73 + }, + { + "epoch": 9.18, + "learning_rate": 4.051724137931034e-06, + "loss": 0.2468, + "step": 74 + }, + { + "epoch": 9.3, + "learning_rate": 3.96551724137931e-06, + "loss": 0.1867, + "step": 75 + }, + { + "epoch": 9.43, + "learning_rate": 3.8793103448275865e-06, + "loss": 0.2012, + "step": 76 + }, + { + "epoch": 9.55, + "learning_rate": 3.793103448275862e-06, + "loss": 0.2075, + "step": 77 + }, + { + "epoch": 9.67, + "learning_rate": 3.7068965517241385e-06, + "loss": 0.1893, + "step": 78 + }, + { + "epoch": 9.8, + "learning_rate": 3.620689655172414e-06, + "loss": 0.1635, + "step": 79 + }, + { + "epoch": 9.92, + "learning_rate": 3.5344827586206898e-06, + "loss": 0.1839, + "step": 80 + }, + { + "epoch": 10.05, + "learning_rate": 3.448275862068966e-06, + "loss": 0.1912, + "step": 81 + }, + { + "epoch": 10.17, + "learning_rate": 3.362068965517242e-06, + "loss": 0.1388, + "step": 82 + }, + { + "epoch": 10.29, + "learning_rate": 3.2758620689655175e-06, + "loss": 0.1337, + "step": 83 + }, + { + "epoch": 10.42, + "learning_rate": 3.1896551724137935e-06, + "loss": 0.1114, + "step": 84 + }, + { + "epoch": 10.54, + "learning_rate": 3.103448275862069e-06, + "loss": 0.163, + "step": 85 + }, + { + "epoch": 10.67, + "learning_rate": 3.017241379310345e-06, + "loss": 0.0887, + "step": 86 + }, + { + "epoch": 10.79, + "learning_rate": 2.931034482758621e-06, + "loss": 0.1345, + "step": 87 + }, + { + "epoch": 10.91, + "learning_rate": 2.844827586206897e-06, + "loss": 0.1339, + "step": 88 + }, + { + "epoch": 11.04, + "learning_rate": 2.7586206896551725e-06, + "loss": 0.1189, + "step": 89 + }, + { + "epoch": 11.16, + "learning_rate": 2.672413793103448e-06, + "loss": 0.1183, + "step": 90 + }, + { + "epoch": 11.29, + "learning_rate": 2.5862068965517246e-06, + "loss": 0.1001, + "step": 91 + }, + { + "epoch": 11.41, + "learning_rate": 2.5e-06, + "loss": 0.0891, + "step": 92 + }, + { + "epoch": 11.53, + "learning_rate": 2.4137931034482762e-06, + "loss": 0.087, + "step": 93 + }, + { + "epoch": 11.66, + "learning_rate": 2.327586206896552e-06, + "loss": 0.0697, + "step": 94 + }, + { + "epoch": 11.78, + "learning_rate": 2.241379310344828e-06, + "loss": 0.0597, + "step": 95 + }, + { + "epoch": 11.91, + "learning_rate": 2.1551724137931035e-06, + "loss": 0.0524, + "step": 96 + }, + { + "epoch": 12.03, + "learning_rate": 2.0689655172413796e-06, + "loss": 0.0555, + "step": 97 + }, + { + "epoch": 12.16, + "learning_rate": 1.982758620689655e-06, + "loss": 0.0559, + "step": 98 + }, + { + "epoch": 12.28, + "learning_rate": 1.896551724137931e-06, + "loss": 0.0588, + "step": 99 + }, + { + "epoch": 12.4, + "learning_rate": 1.810344827586207e-06, + "loss": 0.0519, + "step": 100 + }, + { + "epoch": 12.53, + "learning_rate": 1.724137931034483e-06, + "loss": 0.0494, + "step": 101 + }, + { + "epoch": 12.65, + "learning_rate": 1.6379310344827587e-06, + "loss": 0.0445, + "step": 102 + }, + { + "epoch": 12.78, + "learning_rate": 1.5517241379310346e-06, + "loss": 0.0465, + "step": 103 + }, + { + "epoch": 12.9, + "learning_rate": 1.4655172413793104e-06, + "loss": 0.0459, + "step": 104 + }, + { + "epoch": 13.02, + "learning_rate": 1.3793103448275862e-06, + "loss": 0.0444, + "step": 105 + }, + { + "epoch": 13.15, + "learning_rate": 1.2931034482758623e-06, + "loss": 0.0359, + "step": 106 + }, + { + "epoch": 13.27, + "learning_rate": 1.2068965517241381e-06, + "loss": 0.0385, + "step": 107 + }, + { + "epoch": 13.4, + "learning_rate": 1.120689655172414e-06, + "loss": 0.0337, + "step": 108 + }, + { + "epoch": 13.52, + "learning_rate": 1.0344827586206898e-06, + "loss": 0.0354, + "step": 109 + }, + { + "epoch": 13.64, + "learning_rate": 9.482758620689655e-07, + "loss": 0.0399, + "step": 110 + }, + { + "epoch": 13.77, + "learning_rate": 8.620689655172415e-07, + "loss": 0.0314, + "step": 111 + }, + { + "epoch": 13.89, + "learning_rate": 7.758620689655173e-07, + "loss": 0.0419, + "step": 112 + }, + { + "epoch": 14.02, + "learning_rate": 6.896551724137931e-07, + "loss": 0.0274, + "step": 113 + }, + { + "epoch": 14.14, + "learning_rate": 6.034482758620691e-07, + "loss": 0.0341, + "step": 114 + }, + { + "epoch": 14.26, + "learning_rate": 5.172413793103449e-07, + "loss": 0.0229, + "step": 115 + }, + { + "epoch": 14.39, + "learning_rate": 4.3103448275862073e-07, + "loss": 0.0275, + "step": 116 + }, + { + "epoch": 14.51, + "learning_rate": 3.4482758620689656e-07, + "loss": 0.0285, + "step": 117 + }, + { + "epoch": 14.64, + "learning_rate": 2.5862068965517245e-07, + "loss": 0.0228, + "step": 118 + }, + { + "epoch": 14.76, + "learning_rate": 1.7241379310344828e-07, + "loss": 0.0233, + "step": 119 + }, + { + "epoch": 14.88, + "learning_rate": 8.620689655172414e-08, + "loss": 0.026, + "step": 120 + }, + { + "epoch": 14.88, + "step": 120, + "total_flos": 27811624222720.0, + "train_loss": 1.2693956012527148, + "train_runtime": 9579.7606, + "train_samples_per_second": 1.613, + "train_steps_per_second": 0.013 + } + ], + "max_steps": 120, + "num_train_epochs": 15, + "total_flos": 27811624222720.0, + "trial_name": null, + "trial_params": null +} diff --git a/training_args.bin b/training_args.bin new file mode 100644 index 0000000..2f9cf95 --- /dev/null +++ b/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5101fa7c888f87ca249770953c92d119dee90cb8aaea4dad9672f30b5b180f60 +size 4923