commit 3b2de39bd69948266ac38f884255a6ca532bf193 Author: ModelHub XC Date: Thu May 7 16:05:27 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: llm-jp/llm-jp-3-1.8b-instruct Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..16332c1 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,50 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bin.* filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zstandard filter=lfs diff=lfs merge=lfs -text +*.tfevents* filter=lfs diff=lfs merge=lfs -text +*.db* filter=lfs diff=lfs merge=lfs -text +*.ark* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text +**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text + +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.gguf* filter=lfs diff=lfs merge=lfs -text +*.ggml filter=lfs diff=lfs merge=lfs -text +*.llamafile* filter=lfs diff=lfs merge=lfs -text +*.pt2 filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text + +tokenizer.json filter=lfs diff=lfs merge=lfs -text +model.safetensors filter=lfs diff=lfs merge=lfs -text \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..963fcb3 --- /dev/null +++ b/README.md @@ -0,0 +1,180 @@ +--- +license: apache-2.0 +language: +- en +- ja +programming_language: + - C + - C++ + - C# + - Go + - Java + - JavaScript + - Lua + - PHP + - Python + - Ruby + - Rust + - Scala + - TypeScript +pipeline_tag: text-generation +library_name: transformers +inference: false +--- + +# llm-jp-3-1.8b-instruct + +This repository provides large language models developed by the [Research and Development Center for Large Language Models](https://llmc.nii.ac.jp/) at the [National Institute of Informatics](https://www.nii.ac.jp/en/). + + +| Model Variants | +| :--- | +| [llm-jp-3-1.8b](https://huggingface.co/llm-jp/llm-jp-3-1.8b) | +| [llm-jp-3-1.8b-instruct](https://huggingface.co/llm-jp/llm-jp-3-1.8b-instruct) | +| [llm-jp-3-3.7b](https://huggingface.co/llm-jp/llm-jp-3-3.7b) | +| [llm-jp-3-3.7b-instruct](https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct) | +| [llm-jp-3-13b](https://huggingface.co/llm-jp/llm-jp-3-13b) | +| [llm-jp-3-13b-instruct](https://huggingface.co/llm-jp/llm-jp-3-13b-instruct) | +| [llm-jp-3-172b-beta1](https://huggingface.co/llm-jp/llm-jp-3-172b-beta1) | +| [llm-jp-3-172b-beta1-instruct](https://huggingface.co/llm-jp/llm-jp-3-172b-beta1-instruct) | + + +Checkpoints format: Hugging Face Transformers + +## Required Libraries and Their Versions + +- torch>=2.3.0 +- transformers>=4.40.1 +- tokenizers>=0.19.1 +- accelerate>=0.29.3 +- flash-attn>=2.5.8 + +## Usage + +```python +import torch +from transformers import AutoTokenizer, AutoModelForCausalLM +tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3-1.8b-instruct") +model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3-1.8b-instruct", device_map="auto", torch_dtype=torch.bfloat16) +chat = [ + {"role": "system", "content": "以下は、タスクを説明する指示です。要求を適切に満たす応答を書きなさい。"}, + {"role": "user", "content": "自然言語処理とは何か"}, +] +tokenized_input = tokenizer.apply_chat_template(chat, add_generation_prompt=True, tokenize=True, return_tensors="pt").to(model.device) +with torch.no_grad(): + output = model.generate( + tokenized_input, + max_new_tokens=100, + do_sample=True, + top_p=0.95, + temperature=0.7, + repetition_penalty=1.05, + )[0] +print(tokenizer.decode(output)) +``` + +## Model Details + +- **Model type:** Transformer-based Language Model +- **Total seen tokens:** 2.1T + +|Params|Layers|Hidden size|Heads|Context length|Embedding parameters|Non-embedding parameters| +|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +|1.8b|24|2048|16|4096|407,896,064|1,459,718,144| +|3.7b|28|3072|24|4096|611,844,096|3,171,068,928| +|13b|40|5120|40|4096|1,019,740,160|12,688,184,320| + +## Tokenizer + +The tokenizer of this model is based on [huggingface/tokenizers](https://github.com/huggingface/tokenizers) Unigram byte-fallback model. +The vocabulary entries were converted from [`llm-jp-tokenizer v3.0`](https://github.com/llm-jp/llm-jp-tokenizer/releases/tag/v3.0b2). +Please refer to [README.md](https://github.com/llm-jp/llm-jp-tokenizer) of `llm-jp-tokenizer` for details on the vocabulary construction procedure (the pure SentencePiece training does not reproduce our vocabulary). + +## Datasets + +### Pre-training + +The models have been pre-trained using a blend of the following datasets. + +| Language | Dataset | Tokens| +|:---|:---|---:| +|Japanese|[Wikipedia](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|2.6B +||[Common Crawl](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|762.8B +||[WARP/PDF](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|237.3B +||[WARP/HTML](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|2.7B +||[Kaken](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|1.8B +|English|[Wikipedia](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|4.7B +||[Dolma/CC-head](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|608.5B +||[Dolma/C4](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|181.6B +||[Dolma/Reddit](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|83.1B +||[Dolma/PeS2o](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|62.9B +||[Dolma/Gutenberg](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|5.5B +||[Dolma/Wiki](https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-corpus-v3)|3.9B +|Code|[The Stack](https://huggingface.co/datasets/bigcode/the-stack)|114.1B +|Chinese|[Wikipedia](https://huggingface.co/datasets/bigcode/the-stack)|0.8B +|Korean|[Wikipedia](https://huggingface.co/datasets/bigcode/the-stack)|0.3B + +### Instruction tuning + +The models have been fine-tuned on the following datasets. + +| Language | Dataset | description | +|:---|:---|:---| +|Japanese|[ichikara-instruction-004-002](https://liat-aip.sakura.ne.jp/wp/llm%e3%81%ae%e3%81%9f%e3%82%81%e3%81%ae%e6%97%a5%e6%9c%ac%e8%aa%9e%e3%82%a4%e3%83%b3%e3%82%b9%e3%83%88%e3%83%a9%e3%82%af%e3%82%b7%e3%83%a7%e3%83%b3%e3%83%87%e3%83%bc%e3%82%bf%e4%bd%9c%e6%88%90/llm%e3%81%ae%e3%81%9f%e3%82%81%e3%81%ae%e6%97%a5%e6%9c%ac%e8%aa%9e%e3%82%a4%e3%83%b3%e3%82%b9%e3%83%88%e3%83%a9%e3%82%af%e3%82%b7%e3%83%a7%e3%83%b3%e3%83%87%e3%83%bc%e3%82%bf-%e5%85%ac%e9%96%8b/)| A manually constructed instruction dataset | +| |[answer-carefully-002](https://liat-aip.sakura.ne.jp/wp/answercarefully-dataset/)| A manually constructed instruction dataset focusing on LLMs' safety | +| |ichikara-instruction-format| A small amount of instruction dataset edited from ichikara-instruction, with some constraints on the output format. | +| |[AutoMultiTurnByCalm3-22B](https://huggingface.co/datasets/kanhatakeyama/AutoMultiTurnByCalm3-22B)| A synthetic instruction dataset. | +| |[ramdom-to-fixed-multiturn-Calm3](https://huggingface.co/datasets/kanhatakeyama/ramdom-to-fixed-multiturn-Calm3)| A synthetic instruction dataset. | +| |[wizardlm8x22b-logical-math-coding-sft_additional-ja](https://huggingface.co/datasets/kanhatakeyama/wizardlm8x22b-logical-math-coding-sft_additional-ja)| A synthetic instruction dataset. | +| |[Synthetic-JP-EN-Coding-Dataset-567k](https://huggingface.co/datasets/Aratako/Synthetic-JP-EN-Coding-Dataset-567k)| A synthetic instruction dataset. We used sampled one.| +|English |[FLAN](https://huggingface.co/datasets/Open-Orca/FLAN) | We used sampled one. | + + +## Evaluation + +### llm-jp-eval (v1.3.1) + +We evaluated the models using 100 examples from the dev split. + +| Model name | average | EL | FA | HE | MC | MR | MT | NLI | QA | RC | +| :--- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | ---: | ---: | ---: | +| [llm-jp-3-1.8b](https://huggingface.co/llm-jp/llm-jp-3-1.8b) | 0.3767 | 0.3725 | 0.1948 | 0.2350 | 0.2500 | 0.0900 | 0.7730 | 0.3080 | 0.4629 | 0.7040 | +| [llm-jp-3-1.8b-instruct](https://huggingface.co/llm-jp/llm-jp-3-1.8b-instruct) | 0.4596 | 0.4280 | 0.1987 | 0.3250 | 0.3300 | 0.4200 | 0.7900 | 0.3520 | 0.4698 | 0.8224 | +| [llm-jp-3-3.7b](https://huggingface.co/llm-jp/llm-jp-3-3.7b) | 0.4231 | 0.3812 | 0.2440 | 0.2200 | 0.1900 | 0.3600 | 0.7947 | 0.3800 | 0.4688 | 0.7694 | +| [llm-jp-3-3.7b-instruct](https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct) | 0.5188 | 0.4191 | 0.2504 | 0.3400 | 0.5000 | 0.5800 | 0.8166 | 0.4500 | 0.4881 | 0.8247 | +| [llm-jp-3-13b](https://huggingface.co/llm-jp/llm-jp-3-13b) | 0.5802 | 0.5570 | 0.2593 | 0.4600 | 0.7000 | 0.6300 | 0.8292 | 0.3460 | 0.5937 | 0.8469 | +| [llm-jp-3-13b-instruct](https://huggingface.co/llm-jp/llm-jp-3-13b-instruct) | 0.6168 | 0.5408 | 0.2757 | 0.4950 | 0.9200 | 0.7100 | 0.8317 | 0.4640 | 0.4642 | 0.8500 | + + +### Japanese MT Bench + +We evaluated the models using `gpt-4-0613`. Please see the [codes](https://github.com/llm-jp/llm-leaderboard/tree/main) for details. + +| Model name | average | coding | extraction | humanities | math | reasoning | roleplay | stem | writing | +| :--- | ---: | ---: | ---: | ---: | ---: | ---: | ---: | ---: | ---: | +| [llm-jp-3-1.8b-instruct](https://huggingface.co/llm-jp/llm-jp-3-1.8b-instruct) | 4.93 | 1.50 | 4.70 | 7.80 | 1.55 | 2.60 | 7.80 | 6.10 | 7.40 | +| [llm-jp-3-3.7b-instruct](https://huggingface.co/llm-jp/llm-jp-3-3.7b-instruct) | 5.50 | 1.95 | 4.05 | 8.25 | 2.25 | 4.00 | 8.80 | 7.25 | 7.45 | +| [llm-jp-3-13b-instruct](https://huggingface.co/llm-jp/llm-jp-3-13b-instruct) | 6.47 | 3.15 | 7.05 | 9.15 | 3.75 | 5.40 | 8.30 | 7.50 | 7.45 | + + + +## Risks and Limitations + +The models released here are in the early stages of our research and development and have not been tuned to ensure outputs align with human intent and safety considerations. + + +## Send Questions to + +llm-jp(at)nii.ac.jp + + +## License + +[Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) + + +## Model Card Authors + +*The names are listed in alphabetical order.* + +Hirokazu Kiyomaru and Takashi Kodama. diff --git a/config.json b/config.json new file mode 100644 index 0000000..8033da4 --- /dev/null +++ b/config.json @@ -0,0 +1,28 @@ +{ + "_name_or_path": "llm-jp/llm-jp-3-1.8b", + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 7168, + "max_position_embeddings": 4096, + "model_type": "llama", + "num_attention_heads": 16, + "num_hidden_layers": 24, + "num_key_value_heads": 16, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": null, + "rope_theta": 10000, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.2", + "use_cache": true, + "vocab_size": 99584 +} diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file diff --git a/generation_config.json b/generation_config.json new file mode 100644 index 0000000..589fbca --- /dev/null +++ b/generation_config.json @@ -0,0 +1,6 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "eos_token_id": 2, + "transformers_version": "4.40.2" +} diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..44adc00 --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:300cfd5b4aa095c6e280778e815f049f9a5ebfcae584cc2d9fd6b629dbbd80b7 +size 3735253776 diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..51d5c15 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1,51 @@ +{ + "bos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "cls_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "mask_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "sep_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..b111208 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0fcf4e1e7a08e855273824678363335b0cd707937332ec1cc48eee259065219 +size 6409995 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..ca274f4 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,84 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "3": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "4": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "5": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "6": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "7": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "bos_token": "", + "chat_template": "{{bos_token}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '\\n\\n### 指示:\\n' + message['content'] }}{% elif message['role'] == 'system' %}{{ '以下は、タスクを説明する指示です。要求を適切に満たす応答を書きなさい。' }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### 応答:\\n' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '\\n\\n### 応答:\\n' }}{% endif %}{% endfor %}", + "clean_up_tokenization_spaces": false, + "cls_token": "", + "eod_token": "", + "eos_token": "", + "extra_ids": 0, + "mask_token": "", + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "sep_token": "", + "sp_model_kwargs": {}, + "tokenizer_class": "PreTrainedTokenizerFast", + "unk_token": "" +}