初始化项目,由ModelHub XC社区提供模型
Model: BEE-spoke-data/smol_llama-220M-GQA Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
302
README.md
Normal file
302
README.md
Normal file
@@ -0,0 +1,302 @@
|
|||||||
|
---
|
||||||
|
language:
|
||||||
|
- en
|
||||||
|
license: apache-2.0
|
||||||
|
tags:
|
||||||
|
- smol_llama
|
||||||
|
- llama2
|
||||||
|
datasets:
|
||||||
|
- JeanKaddour/minipile
|
||||||
|
- pszemraj/simple_wikipedia_LM
|
||||||
|
- mattymchen/refinedweb-3m
|
||||||
|
- BEE-spoke-data/knowledge-inoc-concat-v1
|
||||||
|
inference:
|
||||||
|
parameters:
|
||||||
|
max_new_tokens: 64
|
||||||
|
do_sample: true
|
||||||
|
temperature: 0.8
|
||||||
|
repetition_penalty: 1.05
|
||||||
|
no_repeat_ngram_size: 4
|
||||||
|
eta_cutoff: 0.0006
|
||||||
|
renormalize_logits: true
|
||||||
|
widget:
|
||||||
|
- text: My name is El Microondas the Wise, and
|
||||||
|
example_title: El Microondas
|
||||||
|
- text: Kennesaw State University is a public
|
||||||
|
example_title: Kennesaw State University
|
||||||
|
- text: Bungie Studios is an American video game developer. They are most famous for
|
||||||
|
developing the award winning Halo series of video games. They also made Destiny.
|
||||||
|
The studio was founded
|
||||||
|
example_title: Bungie
|
||||||
|
- text: The Mona Lisa is a world-renowned painting created by
|
||||||
|
example_title: Mona Lisa
|
||||||
|
- text: The Harry Potter series, written by J.K. Rowling, begins with the book titled
|
||||||
|
example_title: Harry Potter Series
|
||||||
|
- text: 'Question: I have cities, but no houses. I have mountains, but no trees. I
|
||||||
|
have water, but no fish. What am I?
|
||||||
|
|
||||||
|
Answer:'
|
||||||
|
example_title: Riddle
|
||||||
|
- text: The process of photosynthesis involves the conversion of
|
||||||
|
example_title: Photosynthesis
|
||||||
|
- text: Jane went to the store to buy some groceries. She picked up apples, oranges,
|
||||||
|
and a loaf of bread. When she got home, she realized she forgot
|
||||||
|
example_title: Story Continuation
|
||||||
|
- text: 'Problem 2: If a train leaves Station A at 9:00 AM and travels at 60 mph,
|
||||||
|
and another train leaves Station B at 10:00 AM and travels at 80 mph, when will
|
||||||
|
they meet if the distance between the stations is 300 miles?
|
||||||
|
|
||||||
|
To determine'
|
||||||
|
example_title: Math Problem
|
||||||
|
- text: In the context of computer programming, an algorithm is
|
||||||
|
example_title: Algorithm Definition
|
||||||
|
pipeline_tag: text-generation
|
||||||
|
model-index:
|
||||||
|
- name: smol_llama-220M-GQA
|
||||||
|
results:
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: AI2 Reasoning Challenge (25-Shot)
|
||||||
|
type: ai2_arc
|
||||||
|
config: ARC-Challenge
|
||||||
|
split: test
|
||||||
|
args:
|
||||||
|
num_few_shot: 25
|
||||||
|
metrics:
|
||||||
|
- type: acc_norm
|
||||||
|
value: 24.83
|
||||||
|
name: normalized accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: HellaSwag (10-Shot)
|
||||||
|
type: hellaswag
|
||||||
|
split: validation
|
||||||
|
args:
|
||||||
|
num_few_shot: 10
|
||||||
|
metrics:
|
||||||
|
- type: acc_norm
|
||||||
|
value: 29.76
|
||||||
|
name: normalized accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: MMLU (5-Shot)
|
||||||
|
type: cais/mmlu
|
||||||
|
config: all
|
||||||
|
split: test
|
||||||
|
args:
|
||||||
|
num_few_shot: 5
|
||||||
|
metrics:
|
||||||
|
- type: acc
|
||||||
|
value: 25.85
|
||||||
|
name: accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: TruthfulQA (0-shot)
|
||||||
|
type: truthful_qa
|
||||||
|
config: multiple_choice
|
||||||
|
split: validation
|
||||||
|
args:
|
||||||
|
num_few_shot: 0
|
||||||
|
metrics:
|
||||||
|
- type: mc2
|
||||||
|
value: 44.55
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: Winogrande (5-shot)
|
||||||
|
type: winogrande
|
||||||
|
config: winogrande_xl
|
||||||
|
split: validation
|
||||||
|
args:
|
||||||
|
num_few_shot: 5
|
||||||
|
metrics:
|
||||||
|
- type: acc
|
||||||
|
value: 50.99
|
||||||
|
name: accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: GSM8k (5-shot)
|
||||||
|
type: gsm8k
|
||||||
|
config: main
|
||||||
|
split: test
|
||||||
|
args:
|
||||||
|
num_few_shot: 5
|
||||||
|
metrics:
|
||||||
|
- type: acc
|
||||||
|
value: 0.68
|
||||||
|
name: accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: IFEval (0-Shot)
|
||||||
|
type: HuggingFaceH4/ifeval
|
||||||
|
args:
|
||||||
|
num_few_shot: 0
|
||||||
|
metrics:
|
||||||
|
- type: inst_level_strict_acc and prompt_level_strict_acc
|
||||||
|
value: 23.86
|
||||||
|
name: strict accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: BBH (3-Shot)
|
||||||
|
type: BBH
|
||||||
|
args:
|
||||||
|
num_few_shot: 3
|
||||||
|
metrics:
|
||||||
|
- type: acc_norm
|
||||||
|
value: 3.04
|
||||||
|
name: normalized accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: MATH Lvl 5 (4-Shot)
|
||||||
|
type: hendrycks/competition_math
|
||||||
|
args:
|
||||||
|
num_few_shot: 4
|
||||||
|
metrics:
|
||||||
|
- type: exact_match
|
||||||
|
value: 0.0
|
||||||
|
name: exact match
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: GPQA (0-shot)
|
||||||
|
type: Idavidrein/gpqa
|
||||||
|
args:
|
||||||
|
num_few_shot: 0
|
||||||
|
metrics:
|
||||||
|
- type: acc_norm
|
||||||
|
value: 0.78
|
||||||
|
name: acc_norm
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: MuSR (0-shot)
|
||||||
|
type: TAUR-Lab/MuSR
|
||||||
|
args:
|
||||||
|
num_few_shot: 0
|
||||||
|
metrics:
|
||||||
|
- type: acc_norm
|
||||||
|
value: 9.07
|
||||||
|
name: acc_norm
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
- task:
|
||||||
|
type: text-generation
|
||||||
|
name: Text Generation
|
||||||
|
dataset:
|
||||||
|
name: MMLU-PRO (5-shot)
|
||||||
|
type: TIGER-Lab/MMLU-Pro
|
||||||
|
config: main
|
||||||
|
split: test
|
||||||
|
args:
|
||||||
|
num_few_shot: 5
|
||||||
|
metrics:
|
||||||
|
- type: acc
|
||||||
|
value: 1.66
|
||||||
|
name: accuracy
|
||||||
|
source:
|
||||||
|
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=BEE-spoke-data/smol_llama-220M-GQA
|
||||||
|
name: Open LLM Leaderboard
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# smol_llama: 220M GQA
|
||||||
|
|
||||||
|
|
||||||
|
A small 220M param (total) decoder model. This is the first version of the model.
|
||||||
|
|
||||||
|
- 1024 hidden size, 10 layers
|
||||||
|
- GQA (32 heads, 8 key-value), context length 2048
|
||||||
|
- train-from-scratch on one GPU :)
|
||||||
|
|
||||||
|
## Links
|
||||||
|
|
||||||
|
[Here](https://huggingface.co/collections/BEE-spoke-data/finetuned-smol-220m-65998b080ae723e79c830f83) are some fine-tunes we did, but there are many more possibilities out there!
|
||||||
|
|
||||||
|
- instruct
|
||||||
|
- openhermes - [link](https://huggingface.co/BEE-spoke-data/smol_llama-220M-openhermes)
|
||||||
|
- open-instruct - [link](https://huggingface.co/BEE-spoke-data/smol_llama-220M-open_instruct)
|
||||||
|
- code
|
||||||
|
- python (pypi) - [link](https://huggingface.co/BEE-spoke-data/beecoder-220M-python)
|
||||||
|
- zephyr DPO tune
|
||||||
|
- SFT - [link](https://huggingface.co/BEE-spoke-data/zephyr-220m-sft-full)
|
||||||
|
- full DPO - [link](https://huggingface.co/BEE-spoke-data/zephyr-220m-dpo-full)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||||
|
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_BEE-spoke-data__smol_llama-220M-GQA)
|
||||||
|
|
||||||
|
| Metric |Value|
|
||||||
|
|---------------------------------|----:|
|
||||||
|
|Avg. |29.44|
|
||||||
|
|AI2 Reasoning Challenge (25-Shot)|24.83|
|
||||||
|
|HellaSwag (10-Shot) |29.76|
|
||||||
|
|MMLU (5-Shot) |25.85|
|
||||||
|
|TruthfulQA (0-shot) |44.55|
|
||||||
|
|Winogrande (5-shot) |50.99|
|
||||||
|
|GSM8k (5-shot) | 0.68|
|
||||||
|
|
||||||
|
|
||||||
|
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
|
||||||
|
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_BEE-spoke-data__smol_llama-220M-GQA)
|
||||||
|
|
||||||
|
| Metric |Value|
|
||||||
|
|-------------------|----:|
|
||||||
|
|Avg. | 6.62|
|
||||||
|
|IFEval (0-Shot) |23.86|
|
||||||
|
|BBH (3-Shot) | 3.04|
|
||||||
|
|MATH Lvl 5 (4-Shot)| 0.00|
|
||||||
|
|GPQA (0-shot) | 0.78|
|
||||||
|
|MuSR (0-shot) | 9.07|
|
||||||
|
|MMLU-PRO (5-shot) | 1.66|
|
||||||
|
|
||||||
28
config.json
Normal file
28
config.json
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v17-KI",
|
||||||
|
"architectures": [
|
||||||
|
"LlamaForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_bias": false,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bos_token_id": 1,
|
||||||
|
"eos_token_id": 2,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 1024,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 4096,
|
||||||
|
"max_position_embeddings": 2048,
|
||||||
|
"model_type": "llama",
|
||||||
|
"num_attention_heads": 32,
|
||||||
|
"num_hidden_layers": 10,
|
||||||
|
"num_key_value_heads": 8,
|
||||||
|
"pretraining_tp": 1,
|
||||||
|
"rms_norm_eps": 1e-06,
|
||||||
|
"rope_scaling": null,
|
||||||
|
"rope_theta": 10000.0,
|
||||||
|
"tie_word_embeddings": false,
|
||||||
|
"torch_dtype": "bfloat16",
|
||||||
|
"transformers_version": "4.37.0.dev0",
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 32128
|
||||||
|
}
|
||||||
3996
evals/LOGS-smol_llama-220M-GQA.md
Normal file
3996
evals/LOGS-smol_llama-220M-GQA.md
Normal file
File diff suppressed because one or more lines are too long
56
evals/json_object_1.json
Normal file
56
evals/json_object_1.json
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
{
|
||||||
|
"results": {
|
||||||
|
"arc_easy": {
|
||||||
|
"acc": 0.43813131313131315,
|
||||||
|
"acc_stderr": 0.010180937100600052,
|
||||||
|
"acc_norm": 0.4019360269360269,
|
||||||
|
"acc_norm_stderr": 0.010060521220920566
|
||||||
|
},
|
||||||
|
"boolq": {
|
||||||
|
"acc": 0.617737003058104,
|
||||||
|
"acc_stderr": 0.00849914969044927
|
||||||
|
},
|
||||||
|
"lambada_openai": {
|
||||||
|
"ppl": 64.94966274873535,
|
||||||
|
"ppl_stderr": 2.5466406639926897,
|
||||||
|
"acc": 0.26470017465554047,
|
||||||
|
"acc_stderr": 0.006146408462993569
|
||||||
|
},
|
||||||
|
"openbookqa": {
|
||||||
|
"acc": 0.166,
|
||||||
|
"acc_stderr": 0.016656616876531142,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.020099950647503237
|
||||||
|
},
|
||||||
|
"piqa": {
|
||||||
|
"acc": 0.5973884657236126,
|
||||||
|
"acc_stderr": 0.011442395233488702,
|
||||||
|
"acc_norm": 0.6088139281828074,
|
||||||
|
"acc_norm_stderr": 0.0113862156067287
|
||||||
|
},
|
||||||
|
"winogrande": {
|
||||||
|
"acc": 0.5098658247829518,
|
||||||
|
"acc_stderr": 0.014049749833367589
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"versions": {
|
||||||
|
"arc_easy": 0,
|
||||||
|
"boolq": 1,
|
||||||
|
"lambada_openai": 0,
|
||||||
|
"openbookqa": 0,
|
||||||
|
"piqa": 0,
|
||||||
|
"winogrande": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"model": "hf-causal-experimental",
|
||||||
|
"model_args": "pretrained=BEE-spoke-data/smol_llama-220M-GQA,revision=main,trust_remote_code=True,dtype='bfloat16'",
|
||||||
|
"num_fewshot": 0,
|
||||||
|
"batch_size": "8",
|
||||||
|
"batch_sizes": [],
|
||||||
|
"device": "cuda",
|
||||||
|
"no_cache": false,
|
||||||
|
"limit": null,
|
||||||
|
"bootstrap_iters": 100000,
|
||||||
|
"description_dict": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
25
evals/json_object_2.json
Normal file
25
evals/json_object_2.json
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"results": {
|
||||||
|
"arc_challenge": {
|
||||||
|
"acc": 0.20392491467576793,
|
||||||
|
"acc_stderr": 0.01177426247870226,
|
||||||
|
"acc_norm": 0.25,
|
||||||
|
"acc_norm_stderr": 0.012653835621466646
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"versions": {
|
||||||
|
"arc_challenge": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"model": "hf-causal-experimental",
|
||||||
|
"model_args": "pretrained=BEE-spoke-data/smol_llama-220M-GQA,revision=main,trust_remote_code=True,dtype='bfloat16'",
|
||||||
|
"num_fewshot": 25,
|
||||||
|
"batch_size": "8",
|
||||||
|
"batch_sizes": [],
|
||||||
|
"device": "cuda",
|
||||||
|
"no_cache": false,
|
||||||
|
"limit": null,
|
||||||
|
"bootstrap_iters": 100000,
|
||||||
|
"description_dict": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
25
evals/json_object_3.json
Normal file
25
evals/json_object_3.json
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"results": {
|
||||||
|
"hellaswag": {
|
||||||
|
"acc": 0.2752988047808765,
|
||||||
|
"acc_stderr": 0.008917257773359156,
|
||||||
|
"acc_norm": 0.2968127490039841,
|
||||||
|
"acc_norm_stderr": 0.009120663626901691
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"versions": {
|
||||||
|
"hellaswag": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"model": "hf-causal-experimental",
|
||||||
|
"model_args": "pretrained=BEE-spoke-data/smol_llama-220M-GQA,revision=main,trust_remote_code=True,dtype='bfloat16'",
|
||||||
|
"num_fewshot": 10,
|
||||||
|
"batch_size": "8",
|
||||||
|
"batch_sizes": [],
|
||||||
|
"device": "cuda",
|
||||||
|
"no_cache": false,
|
||||||
|
"limit": 0.25,
|
||||||
|
"bootstrap_iters": 100000,
|
||||||
|
"description_dict": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
25
evals/json_object_4.json
Normal file
25
evals/json_object_4.json
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"results": {
|
||||||
|
"truthfulqa_mc": {
|
||||||
|
"mc1": 0.23745410036719705,
|
||||||
|
"mc1_stderr": 0.014896277441041836,
|
||||||
|
"mc2": 0.4402813457518687,
|
||||||
|
"mc2_stderr": 0.015339681556915718
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"versions": {
|
||||||
|
"truthfulqa_mc": 1
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"model": "hf-causal-experimental",
|
||||||
|
"model_args": "pretrained=BEE-spoke-data/smol_llama-220M-GQA,revision=main,trust_remote_code=True,dtype='bfloat16'",
|
||||||
|
"num_fewshot": 0,
|
||||||
|
"batch_size": "8",
|
||||||
|
"batch_sizes": [],
|
||||||
|
"device": "cuda",
|
||||||
|
"no_cache": false,
|
||||||
|
"limit": null,
|
||||||
|
"bootstrap_iters": 100000,
|
||||||
|
"description_dict": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
417
evals/json_object_5.json
Normal file
417
evals/json_object_5.json
Normal file
@@ -0,0 +1,417 @@
|
|||||||
|
{
|
||||||
|
"results": {
|
||||||
|
"hendrycksTest-abstract_algebra": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-anatomy": {
|
||||||
|
"acc": 0.4,
|
||||||
|
"acc_stderr": 0.1,
|
||||||
|
"acc_norm": 0.4,
|
||||||
|
"acc_norm_stderr": 0.1
|
||||||
|
},
|
||||||
|
"hendrycksTest-astronomy": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390466,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390466
|
||||||
|
},
|
||||||
|
"hendrycksTest-business_ethics": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-clinical_knowledge": {
|
||||||
|
"acc": 0.24,
|
||||||
|
"acc_stderr": 0.08717797887081345,
|
||||||
|
"acc_norm": 0.24,
|
||||||
|
"acc_norm_stderr": 0.08717797887081345
|
||||||
|
},
|
||||||
|
"hendrycksTest-college_biology": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277262,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277262
|
||||||
|
},
|
||||||
|
"hendrycksTest-college_chemistry": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.0748331477354788,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.0748331477354788
|
||||||
|
},
|
||||||
|
"hendrycksTest-college_computer_science": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390466,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390466
|
||||||
|
},
|
||||||
|
"hendrycksTest-college_mathematics": {
|
||||||
|
"acc": 0.4,
|
||||||
|
"acc_stderr": 0.1,
|
||||||
|
"acc_norm": 0.4,
|
||||||
|
"acc_norm_stderr": 0.1
|
||||||
|
},
|
||||||
|
"hendrycksTest-college_medicine": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-college_physics": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.0748331477354788,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.0748331477354788
|
||||||
|
},
|
||||||
|
"hendrycksTest-computer_security": {
|
||||||
|
"acc": 0.52,
|
||||||
|
"acc_stderr": 0.10198039027185572,
|
||||||
|
"acc_norm": 0.52,
|
||||||
|
"acc_norm_stderr": 0.10198039027185572
|
||||||
|
},
|
||||||
|
"hendrycksTest-conceptual_physics": {
|
||||||
|
"acc": 0.24,
|
||||||
|
"acc_stderr": 0.08717797887081347,
|
||||||
|
"acc_norm": 0.24,
|
||||||
|
"acc_norm_stderr": 0.08717797887081347
|
||||||
|
},
|
||||||
|
"hendrycksTest-econometrics": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-electrical_engineering": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.07483314773547882,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.07483314773547882
|
||||||
|
},
|
||||||
|
"hendrycksTest-elementary_mathematics": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.0748331477354788,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.0748331477354788
|
||||||
|
},
|
||||||
|
"hendrycksTest-formal_logic": {
|
||||||
|
"acc": 0.24,
|
||||||
|
"acc_stderr": 0.08717797887081345,
|
||||||
|
"acc_norm": 0.24,
|
||||||
|
"acc_norm_stderr": 0.08717797887081345
|
||||||
|
},
|
||||||
|
"hendrycksTest-global_facts": {
|
||||||
|
"acc": 0.4,
|
||||||
|
"acc_stderr": 0.1,
|
||||||
|
"acc_norm": 0.4,
|
||||||
|
"acc_norm_stderr": 0.1
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_biology": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390465,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390465
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_chemistry": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_computer_science": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_european_history": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.0748331477354788,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.0748331477354788
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_geography": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390466,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390466
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_government_and_politics": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.09165151389911677,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.09165151389911677
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_macroeconomics": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.09165151389911681,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.09165151389911681
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_mathematics": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390467,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390467
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_microeconomics": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.09165151389911678,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.09165151389911678
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_physics": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.0748331477354788,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.0748331477354788
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_psychology": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.09165151389911678,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.09165151389911678
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_statistics": {
|
||||||
|
"acc": 0.4,
|
||||||
|
"acc_stderr": 0.10000000000000002,
|
||||||
|
"acc_norm": 0.4,
|
||||||
|
"acc_norm_stderr": 0.10000000000000002
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_us_history": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.0916515138991168,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.0916515138991168
|
||||||
|
},
|
||||||
|
"hendrycksTest-high_school_world_history": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.0916515138991168,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.0916515138991168
|
||||||
|
},
|
||||||
|
"hendrycksTest-human_aging": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-human_sexuality": {
|
||||||
|
"acc": 0.44,
|
||||||
|
"acc_stderr": 0.10132456102380442,
|
||||||
|
"acc_norm": 0.44,
|
||||||
|
"acc_norm_stderr": 0.10132456102380442
|
||||||
|
},
|
||||||
|
"hendrycksTest-international_law": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390466,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390466
|
||||||
|
},
|
||||||
|
"hendrycksTest-jurisprudence": {
|
||||||
|
"acc": 0.36,
|
||||||
|
"acc_stderr": 0.09797958971132713,
|
||||||
|
"acc_norm": 0.36,
|
||||||
|
"acc_norm_stderr": 0.09797958971132713
|
||||||
|
},
|
||||||
|
"hendrycksTest-logical_fallacies": {
|
||||||
|
"acc": 0.24,
|
||||||
|
"acc_stderr": 0.08717797887081345,
|
||||||
|
"acc_norm": 0.24,
|
||||||
|
"acc_norm_stderr": 0.08717797887081345
|
||||||
|
},
|
||||||
|
"hendrycksTest-machine_learning": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.0748331477354788,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.0748331477354788
|
||||||
|
},
|
||||||
|
"hendrycksTest-management": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390465,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390465
|
||||||
|
},
|
||||||
|
"hendrycksTest-marketing": {
|
||||||
|
"acc": 0.08,
|
||||||
|
"acc_stderr": 0.05537749241945382,
|
||||||
|
"acc_norm": 0.08,
|
||||||
|
"acc_norm_stderr": 0.05537749241945382
|
||||||
|
},
|
||||||
|
"hendrycksTest-medical_genetics": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.09165151389911678,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.09165151389911678
|
||||||
|
},
|
||||||
|
"hendrycksTest-miscellaneous": {
|
||||||
|
"acc": 0.08,
|
||||||
|
"acc_stderr": 0.05537749241945382,
|
||||||
|
"acc_norm": 0.08,
|
||||||
|
"acc_norm_stderr": 0.05537749241945382
|
||||||
|
},
|
||||||
|
"hendrycksTest-moral_disputes": {
|
||||||
|
"acc": 0.12,
|
||||||
|
"acc_stderr": 0.066332495807108,
|
||||||
|
"acc_norm": 0.12,
|
||||||
|
"acc_norm_stderr": 0.066332495807108
|
||||||
|
},
|
||||||
|
"hendrycksTest-moral_scenarios": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-nutrition": {
|
||||||
|
"acc": 0.52,
|
||||||
|
"acc_stderr": 0.10198039027185572,
|
||||||
|
"acc_norm": 0.52,
|
||||||
|
"acc_norm_stderr": 0.10198039027185572
|
||||||
|
},
|
||||||
|
"hendrycksTest-philosophy": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-prehistory": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.07483314773547882,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.07483314773547882
|
||||||
|
},
|
||||||
|
"hendrycksTest-professional_accounting": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390465,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390465
|
||||||
|
},
|
||||||
|
"hendrycksTest-professional_law": {
|
||||||
|
"acc": 0.16,
|
||||||
|
"acc_stderr": 0.07483314773547882,
|
||||||
|
"acc_norm": 0.16,
|
||||||
|
"acc_norm_stderr": 0.07483314773547882
|
||||||
|
},
|
||||||
|
"hendrycksTest-professional_medicine": {
|
||||||
|
"acc": 0.4,
|
||||||
|
"acc_stderr": 0.10000000000000002,
|
||||||
|
"acc_norm": 0.4,
|
||||||
|
"acc_norm_stderr": 0.10000000000000002
|
||||||
|
},
|
||||||
|
"hendrycksTest-professional_psychology": {
|
||||||
|
"acc": 0.24,
|
||||||
|
"acc_stderr": 0.08717797887081345,
|
||||||
|
"acc_norm": 0.24,
|
||||||
|
"acc_norm_stderr": 0.08717797887081345
|
||||||
|
},
|
||||||
|
"hendrycksTest-public_relations": {
|
||||||
|
"acc": 0.2,
|
||||||
|
"acc_stderr": 0.08164965809277261,
|
||||||
|
"acc_norm": 0.2,
|
||||||
|
"acc_norm_stderr": 0.08164965809277261
|
||||||
|
},
|
||||||
|
"hendrycksTest-security_studies": {
|
||||||
|
"acc": 0.32,
|
||||||
|
"acc_stderr": 0.09521904571390466,
|
||||||
|
"acc_norm": 0.32,
|
||||||
|
"acc_norm_stderr": 0.09521904571390466
|
||||||
|
},
|
||||||
|
"hendrycksTest-sociology": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.09165151389911678,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.09165151389911678
|
||||||
|
},
|
||||||
|
"hendrycksTest-us_foreign_policy": {
|
||||||
|
"acc": 0.24,
|
||||||
|
"acc_stderr": 0.08717797887081345,
|
||||||
|
"acc_norm": 0.24,
|
||||||
|
"acc_norm_stderr": 0.08717797887081345
|
||||||
|
},
|
||||||
|
"hendrycksTest-virology": {
|
||||||
|
"acc": 0.12,
|
||||||
|
"acc_stderr": 0.06633249580710801,
|
||||||
|
"acc_norm": 0.12,
|
||||||
|
"acc_norm_stderr": 0.06633249580710801
|
||||||
|
},
|
||||||
|
"hendrycksTest-world_religions": {
|
||||||
|
"acc": 0.28,
|
||||||
|
"acc_stderr": 0.09165151389911678,
|
||||||
|
"acc_norm": 0.28,
|
||||||
|
"acc_norm_stderr": 0.09165151389911678
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"versions": {
|
||||||
|
"hendrycksTest-abstract_algebra": 1,
|
||||||
|
"hendrycksTest-anatomy": 1,
|
||||||
|
"hendrycksTest-astronomy": 1,
|
||||||
|
"hendrycksTest-business_ethics": 1,
|
||||||
|
"hendrycksTest-clinical_knowledge": 1,
|
||||||
|
"hendrycksTest-college_biology": 1,
|
||||||
|
"hendrycksTest-college_chemistry": 1,
|
||||||
|
"hendrycksTest-college_computer_science": 1,
|
||||||
|
"hendrycksTest-college_mathematics": 1,
|
||||||
|
"hendrycksTest-college_medicine": 1,
|
||||||
|
"hendrycksTest-college_physics": 1,
|
||||||
|
"hendrycksTest-computer_security": 1,
|
||||||
|
"hendrycksTest-conceptual_physics": 1,
|
||||||
|
"hendrycksTest-econometrics": 1,
|
||||||
|
"hendrycksTest-electrical_engineering": 1,
|
||||||
|
"hendrycksTest-elementary_mathematics": 1,
|
||||||
|
"hendrycksTest-formal_logic": 1,
|
||||||
|
"hendrycksTest-global_facts": 1,
|
||||||
|
"hendrycksTest-high_school_biology": 1,
|
||||||
|
"hendrycksTest-high_school_chemistry": 1,
|
||||||
|
"hendrycksTest-high_school_computer_science": 1,
|
||||||
|
"hendrycksTest-high_school_european_history": 1,
|
||||||
|
"hendrycksTest-high_school_geography": 1,
|
||||||
|
"hendrycksTest-high_school_government_and_politics": 1,
|
||||||
|
"hendrycksTest-high_school_macroeconomics": 1,
|
||||||
|
"hendrycksTest-high_school_mathematics": 1,
|
||||||
|
"hendrycksTest-high_school_microeconomics": 1,
|
||||||
|
"hendrycksTest-high_school_physics": 1,
|
||||||
|
"hendrycksTest-high_school_psychology": 1,
|
||||||
|
"hendrycksTest-high_school_statistics": 1,
|
||||||
|
"hendrycksTest-high_school_us_history": 1,
|
||||||
|
"hendrycksTest-high_school_world_history": 1,
|
||||||
|
"hendrycksTest-human_aging": 1,
|
||||||
|
"hendrycksTest-human_sexuality": 1,
|
||||||
|
"hendrycksTest-international_law": 1,
|
||||||
|
"hendrycksTest-jurisprudence": 1,
|
||||||
|
"hendrycksTest-logical_fallacies": 1,
|
||||||
|
"hendrycksTest-machine_learning": 1,
|
||||||
|
"hendrycksTest-management": 1,
|
||||||
|
"hendrycksTest-marketing": 1,
|
||||||
|
"hendrycksTest-medical_genetics": 1,
|
||||||
|
"hendrycksTest-miscellaneous": 1,
|
||||||
|
"hendrycksTest-moral_disputes": 1,
|
||||||
|
"hendrycksTest-moral_scenarios": 1,
|
||||||
|
"hendrycksTest-nutrition": 1,
|
||||||
|
"hendrycksTest-philosophy": 1,
|
||||||
|
"hendrycksTest-prehistory": 1,
|
||||||
|
"hendrycksTest-professional_accounting": 1,
|
||||||
|
"hendrycksTest-professional_law": 1,
|
||||||
|
"hendrycksTest-professional_medicine": 1,
|
||||||
|
"hendrycksTest-professional_psychology": 1,
|
||||||
|
"hendrycksTest-public_relations": 1,
|
||||||
|
"hendrycksTest-security_studies": 1,
|
||||||
|
"hendrycksTest-sociology": 1,
|
||||||
|
"hendrycksTest-us_foreign_policy": 1,
|
||||||
|
"hendrycksTest-virology": 1,
|
||||||
|
"hendrycksTest-world_religions": 1
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"model": "hf-causal-experimental",
|
||||||
|
"model_args": "pretrained=BEE-spoke-data/smol_llama-220M-GQA,revision=main,trust_remote_code=True,dtype='bfloat16'",
|
||||||
|
"num_fewshot": 5,
|
||||||
|
"batch_size": "8",
|
||||||
|
"batch_sizes": [],
|
||||||
|
"device": "cuda",
|
||||||
|
"no_cache": false,
|
||||||
|
"limit": 0.25,
|
||||||
|
"bootstrap_iters": 100000,
|
||||||
|
"description_dict": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
7
generation_config.json
Normal file
7
generation_config.json
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"_from_model_config": true,
|
||||||
|
"bos_token_id": 1,
|
||||||
|
"eos_token_id": 2,
|
||||||
|
"transformers_version": "4.37.0.dev0",
|
||||||
|
"use_cache": false
|
||||||
|
}
|
||||||
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:5d2ae6c955eed0973977b5d70c24238afd09756acaaa5e81c8e5bf8483dd1413
|
||||||
|
size 435736840
|
||||||
30
special_tokens_map.json
Normal file
30
special_tokens_map.json
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"bos_token": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"unk_token": {
|
||||||
|
"content": "<unk>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
3
tokenizer.model
Normal file
3
tokenizer.model
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
||||||
|
size 499723
|
||||||
44
tokenizer_config.json
Normal file
44
tokenizer_config.json
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"add_bos_token": true,
|
||||||
|
"add_eos_token": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"0": {
|
||||||
|
"content": "<unk>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": "</s>",
|
||||||
|
"legacy": false,
|
||||||
|
"model_max_length": 1000000000000000019884624838656,
|
||||||
|
"pad_token": "</s>",
|
||||||
|
"padding_side": "right",
|
||||||
|
"sp_model_kwargs": {},
|
||||||
|
"spaces_between_special_tokens": false,
|
||||||
|
"tokenizer_class": "LlamaTokenizer",
|
||||||
|
"trust_remote_code": false,
|
||||||
|
"unk_token": "<unk>",
|
||||||
|
"use_default_system_prompt": true,
|
||||||
|
"use_fast": true
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user