From e6edc137b00add7056b7dcb67882bc76dc4013fc Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sun, 10 May 2026 16:26:57 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: hfl/chinese-llama-2-13b-16k Source: Original Platform --- .gitattributes | 47 ++++++++++++++++++++++++++++++++ README.md | 44 ++++++++++++++++++++++++++++++ SHA256.md | 9 ++++++ config.json | 28 +++++++++++++++++++ configuration.json | 1 + generation_config.json | 9 ++++++ pytorch_model-00001-of-00003.bin | 3 ++ pytorch_model-00002-of-00003.bin | 3 ++ pytorch_model-00003-of-00003.bin | 3 ++ pytorch_model.bin.index.json | 3 ++ special_tokens_map.json | 24 ++++++++++++++++ tokenizer.model | 3 ++ tokenizer_config.json | 35 ++++++++++++++++++++++++ 13 files changed, 212 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 SHA256.md create mode 100644 config.json create mode 100644 configuration.json create mode 100644 generation_config.json create mode 100644 pytorch_model-00001-of-00003.bin create mode 100644 pytorch_model-00002-of-00003.bin create mode 100644 pytorch_model-00003-of-00003.bin create mode 100644 pytorch_model.bin.index.json create mode 100644 special_tokens_map.json create mode 100644 tokenizer.model create mode 100644 tokenizer_config.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..53d7257 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bin.* filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*.tfevents* filter=lfs diff=lfs merge=lfs -text +*.db* filter=lfs diff=lfs merge=lfs -text +*.ark* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.gguf* filter=lfs diff=lfs merge=lfs -text +*.ggml filter=lfs diff=lfs merge=lfs -text +*.llamafile* filter=lfs diff=lfs merge=lfs -text +*.pt2 filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..d1ef88c --- /dev/null +++ b/README.md @@ -0,0 +1,44 @@ +--- +license: apache-2.0 +language: +- zh +- en +--- + +# Chinese-LLaMA-2-12B-16K + +**This is the full Chinese-LLaMA-2-13B-16K (context size 16K),model,which can be loaded directly for inference and full-parameter training.** + +**Related models👇** +* Long context base models (16K) + * [Chinese-LLaMA-2-7B-16K (full model)](https://huggingface.co/hfl/chinese-llama-2-7b-16k) + * [Chinese-LLaMA-2-LoRA-7B-16K (LoRA model)](https://huggingface.co/hfl/chinese-llama-2-lora-7b-16k) + * [Chinese-LLaMA-2-13B-16K (full model)](https://huggingface.co/hfl/chinese-llama-2-13b-16k) + * [Chinese-LLaMA-2-LoRA-13B-16K (LoRA model)](https://huggingface.co/hfl/chinese-llama-2-lora-13b-16k) +* Long context Instruction/Chat models + * [Chinese-Alpaca-2-7B-16K (full model)](https://huggingface.co/hfl/chinese-alpaca-2-7b-16k) + * [Chinese-Alpaca-2-LoRA-7B-16K (LoRA model)](https://huggingface.co/hfl/chinese-alpaca-2-lora-7b-16k) + * [Chinese-Alpaca-2-13B-16K (full model)](https://huggingface.co/hfl/chinese-alpaca-2-13b-16k) + * [Chinese-Alpaca-2-LoRA-13B-16K (LoRA model)](https://huggingface.co/hfl/chinese-alpaca-2-lora-13b-16k) +* Base models + * [Chinese-LLaMA-2-7B (full model)](https://huggingface.co/hfl/chinese-llama-2-7b) + * [Chinese-LLaMA-2-LoRA-7B (LoRA model)](https://huggingface.co/hfl/chinese-llama-2-lora-7b) + * [Chinese-LLaMA-2-13B (full model)](https://huggingface.co/hfl/chinese-llama-2-13b) + * [Chinese-LLaMA-2-LoRA-13B (LoRA model)](https://huggingface.co/hfl/chinese-llama-2-lora-13b) +* Instruction/Chat models + * [Chinese-Alpaca-2-7B (full model)](https://huggingface.co/hfl/chinese-alpaca-2-7b) + * [Chinese-Alpaca-2-LoRA-7B (LoRA model)](https://huggingface.co/hfl/chinese-alpaca-2-lora-7b) + * [Chinese-Alpaca-2-13B (full model)](https://huggingface.co/hfl/chinese-alpaca-2-13b) + * [Chinese-Alpaca-2-LoRA-13B (LoRA model)](https://huggingface.co/hfl/chinese-alpaca-2-lora-13b) + +# Description of Chinese-LLaMA-Alpaca-2 +This project is based on the Llama-2, released by Meta, and it is the second generation of the Chinese LLaMA & Alpaca LLM project. We open-source Chinese LLaMA-2 (foundation model) and Alpaca-2 (instruction-following model). These models have been expanded and optimized with Chinese vocabulary beyond the original Llama-2. We used large-scale Chinese data for incremental pre-training, which further improved the fundamental semantic understanding of the Chinese language, resulting in a significant performance improvement compared to the first-generation models. The relevant models support a 4K context and can be expanded up to 18K+ using the NTK method. + +The main contents of this project include: + +* 🚀 New extended Chinese vocabulary beyond Llama-2, open-sourcing the Chinese LLaMA-2 and Alpaca-2 LLMs. +* 🚀 Open-sourced the pre-training and instruction finetuning (SFT) scripts for further tuning on user's data +* 🚀 Quickly deploy and experience the quantized LLMs on CPU/GPU of personal PC +* 🚀 Support for LLaMA ecosystems like 🤗transformers, llama.cpp, text-generation-webui, LangChain, vLLM etc. + +Please refer to [https://github.com/ymcui/Chinese-LLaMA-Alpaca-2/](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2/) for details. \ No newline at end of file diff --git a/SHA256.md b/SHA256.md new file mode 100644 index 0000000..c4d2808 --- /dev/null +++ b/SHA256.md @@ -0,0 +1,9 @@ +a19d75bc8a82a885fbfc7fc952e6c53b943243dd2452f0e21f02f7a5fe50a699 config.json +f8e961981aa91ce005f79e9d5d58a408c50645f6eb3057240406af7dddf6a0bc generation_config.json +94e6003f45a904cdcd051cf6f84994eb26be406a1cc5105636ee2251f41baec1 pytorch_model-00001-of-00003.bin +e27df0d606df1441389ab14ffa370d36d320083e5bec8a76f1c20838d5cb7aef pytorch_model-00002-of-00003.bin +7f5d8b82ea27e874076454c6f6d1585c70ce27c4d9088c4636fc726bb24c0edd pytorch_model-00003-of-00003.bin +f80e62cf9b1a31cf4e4619c5568c7cf58fb31df867a36873dd407232b06e74be pytorch_model.bin.index.json +dfd7f38bbbe1f22c1f6e05db6241ad82176a9765d91b51b3fee3e3835e6ac75f special_tokens_map.json +a3b8844863b200dfcca971db228e96ce388290dfcf72c15d7a9d2f604bac787c tokenizer.model +305a57cf5eca7b87705ffe64d9c0ccb23ff09e85342597227ac4207953f7fea6 tokenizer_config.json diff --git a/config.json b/config.json new file mode 100644 index 0000000..61aa523 --- /dev/null +++ b/config.json @@ -0,0 +1,28 @@ +{ + "_name_or_path": "meta-llama/Llama-2-13b-hf", + "architectures": [ + "LlamaForCausalLM" + ], + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 5120, + "initializer_range": 0.02, + "intermediate_size": 13824, + "max_position_embeddings": 16384, + "model_type": "llama", + "num_attention_heads": 40, + "num_hidden_layers": 40, + "num_key_value_heads": 40, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 4.0, + "type": "linear" + }, + "tie_word_embeddings": false, + "torch_dtype": "float16", + "transformers_version": "4.31.0", + "use_cache": true, + "vocab_size": 55296 +} \ No newline at end of file diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..471526c --- /dev/null +++ b/generation_config.json @@ -0,0 +1,9 @@ +{ + "bos_token_id": 1, + "eos_token_id": 2, + "max_length": 4096, + "pad_token_id": 0, + "temperature": 0.9, + "top_p": 0.6, + "transformers_version": "4.32.0.dev0" +} \ No newline at end of file diff --git a/pytorch_model-00001-of-00003.bin b/pytorch_model-00001-of-00003.bin new file mode 100644 index 0000000..51d1d03 --- /dev/null +++ b/pytorch_model-00001-of-00003.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94e6003f45a904cdcd051cf6f84994eb26be406a1cc5105636ee2251f41baec1 +size 10187279506 diff --git a/pytorch_model-00002-of-00003.bin b/pytorch_model-00002-of-00003.bin new file mode 100644 index 0000000..cbad4b4 --- /dev/null +++ b/pytorch_model-00002-of-00003.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e27df0d606df1441389ab14ffa370d36d320083e5bec8a76f1c20838d5cb7aef +size 9904165024 diff --git a/pytorch_model-00003-of-00003.bin b/pytorch_model-00003-of-00003.bin new file mode 100644 index 0000000..f944091 --- /dev/null +++ b/pytorch_model-00003-of-00003.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f5d8b82ea27e874076454c6f6d1585c70ce27c4d9088c4636fc726bb24c0edd +size 6417534665 diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json new file mode 100644 index 0000000..c40390e --- /dev/null +++ b/pytorch_model.bin.index.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f80e62cf9b1a31cf4e4619c5568c7cf58fb31df867a36873dd407232b06e74be +size 33443 diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..9588650 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,24 @@ +{ + "bos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "pad_token": "", + "unk_token": { + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.model b/tokenizer.model new file mode 100644 index 0000000..3df664f --- /dev/null +++ b/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3b8844863b200dfcca971db228e96ce388290dfcf72c15d7a9d2f604bac787c +size 844403 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..5076180 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,35 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "clean_up_tokenization_spaces": false, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "legacy": true, + "model_max_length": 1000000000000000019884624838656, + "pad_token": null, + "sp_model_kwargs": {}, + "tokenizer_class": "LlamaTokenizer", + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "use_fast": false +}