1092 lines
37 KiB
JSON
1092 lines
37 KiB
JSON
{
|
|
"results": {
|
|
"mmlu_pro": {
|
|
"exact_match,custom-extract": 0.23296210106382978,
|
|
"exact_match_stderr,custom-extract": 0.0037777214037287895,
|
|
"alias": "mmlu_pro"
|
|
},
|
|
"mmlu_pro_biology": {
|
|
"alias": " - biology",
|
|
"exact_match,custom-extract": 0.42677824267782427,
|
|
"exact_match_stderr,custom-extract": 0.01848442550876763
|
|
},
|
|
"mmlu_pro_business": {
|
|
"alias": " - business",
|
|
"exact_match,custom-extract": 0.24841571609632446,
|
|
"exact_match_stderr,custom-extract": 0.01539271648961898
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"alias": " - chemistry",
|
|
"exact_match,custom-extract": 0.1068904593639576,
|
|
"exact_match_stderr,custom-extract": 0.009187355756744654
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"alias": " - computer_science",
|
|
"exact_match,custom-extract": 0.23658536585365852,
|
|
"exact_match_stderr,custom-extract": 0.021014183737081388
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"alias": " - economics",
|
|
"exact_match,custom-extract": 0.3175355450236967,
|
|
"exact_match_stderr,custom-extract": 0.016033281025390467
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"alias": " - engineering",
|
|
"exact_match,custom-extract": 0.14447884416924664,
|
|
"exact_match_stderr,custom-extract": 0.011300036008717563
|
|
},
|
|
"mmlu_pro_health": {
|
|
"alias": " - health",
|
|
"exact_match,custom-extract": 0.26894865525672373,
|
|
"exact_match_stderr,custom-extract": 0.015513064581043463
|
|
},
|
|
"mmlu_pro_history": {
|
|
"alias": " - history",
|
|
"exact_match,custom-extract": 0.2782152230971129,
|
|
"exact_match_stderr,custom-extract": 0.022988069716710875
|
|
},
|
|
"mmlu_pro_law": {
|
|
"alias": " - law",
|
|
"exact_match,custom-extract": 0.16621253405994552,
|
|
"exact_match_stderr,custom-extract": 0.011224402295539303
|
|
},
|
|
"mmlu_pro_math": {
|
|
"alias": " - math",
|
|
"exact_match,custom-extract": 0.23538119911176905,
|
|
"exact_match_stderr,custom-extract": 0.011546264113347198
|
|
},
|
|
"mmlu_pro_other": {
|
|
"alias": " - other",
|
|
"exact_match,custom-extract": 0.2694805194805195,
|
|
"exact_match_stderr,custom-extract": 0.014604232497008566
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"alias": " - philosophy",
|
|
"exact_match,custom-extract": 0.20040080160320642,
|
|
"exact_match_stderr,custom-extract": 0.017937884810811502
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"alias": " - physics",
|
|
"exact_match,custom-extract": 0.16320246343341033,
|
|
"exact_match_stderr,custom-extract": 0.010257374338618742
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"alias": " - psychology",
|
|
"exact_match,custom-extract": 0.35964912280701755,
|
|
"exact_match_stderr,custom-extract": 0.016998842357482922
|
|
}
|
|
},
|
|
"groups": {
|
|
"mmlu_pro": {
|
|
"exact_match,custom-extract": 0.23296210106382978,
|
|
"exact_match_stderr,custom-extract": 0.0037777214037287895,
|
|
"alias": "mmlu_pro"
|
|
}
|
|
},
|
|
"group_subtasks": {
|
|
"mmlu_pro": [
|
|
"mmlu_pro_biology",
|
|
"mmlu_pro_business",
|
|
"mmlu_pro_chemistry",
|
|
"mmlu_pro_computer_science",
|
|
"mmlu_pro_economics",
|
|
"mmlu_pro_engineering",
|
|
"mmlu_pro_health",
|
|
"mmlu_pro_history",
|
|
"mmlu_pro_law",
|
|
"mmlu_pro_math",
|
|
"mmlu_pro_other",
|
|
"mmlu_pro_philosophy",
|
|
"mmlu_pro_physics",
|
|
"mmlu_pro_psychology"
|
|
]
|
|
},
|
|
"configs": {
|
|
"mmlu_pro_biology": {
|
|
"task": "mmlu_pro_biology",
|
|
"task_alias": "biology",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x15260c16ec20>, subject='biology')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16c3a0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about biology. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16e8c0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_business": {
|
|
"task": "mmlu_pro_business",
|
|
"task_alias": "business",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x15260c16c8b0>, subject='business')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16e830>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about business. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16dc60>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"task": "mmlu_pro_chemistry",
|
|
"task_alias": "chemistry",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x15260c16edd0>, subject='chemistry')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16d900>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about chemistry. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16df30>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"task": "mmlu_pro_computer_science",
|
|
"task_alias": "computer_science",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x15260c16dbd0>, subject='computer science')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16f1c0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about computer science. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16ea70>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"task": "mmlu_pro_economics",
|
|
"task_alias": "economics",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x15260c16c310>, subject='economics')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16d510>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about economics. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16f400>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"task": "mmlu_pro_engineering",
|
|
"task_alias": "engineering",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x152620046440>, subject='engineering')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x1526200469e0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about engineering. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16eb00>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_health": {
|
|
"task": "mmlu_pro_health",
|
|
"task_alias": "health",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x1526200470a0>, subject='health')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620047010>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about health. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620047e20>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_history": {
|
|
"task": "mmlu_pro_history",
|
|
"task_alias": "history",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x152620046dd0>, subject='history')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620046710>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about history. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x1526200471c0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_law": {
|
|
"task": "mmlu_pro_law",
|
|
"task_alias": "law",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x152620046cb0>, subject='law')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620047d00>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about law. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620046c20>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_math": {
|
|
"task": "mmlu_pro_math",
|
|
"task_alias": "math",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x15260c16c1f0>, subject='math')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16c0d0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about math. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x15260c16feb0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_other": {
|
|
"task": "mmlu_pro_other",
|
|
"task_alias": "other",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x1526200a5d80>, subject='other')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x1526200a7370>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about other. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620047a30>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"task": "mmlu_pro_philosophy",
|
|
"task_alias": "philosophy",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x1526200465f0>, subject='philosophy')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x1526200464d0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about philosophy. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620046560>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"task": "mmlu_pro_physics",
|
|
"task_alias": "physics",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x152620046290>, subject='physics')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620046170>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about physics. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152620045fc0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"task": "mmlu_pro_psychology",
|
|
"task_alias": "psychology",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x152621286950>, subject='psychology')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x1526212869e0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about psychology. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x152621286b00>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
}
|
|
},
|
|
"versions": {
|
|
"mmlu_pro": 2.0,
|
|
"mmlu_pro_biology": 1.0,
|
|
"mmlu_pro_business": 1.0,
|
|
"mmlu_pro_chemistry": 1.0,
|
|
"mmlu_pro_computer_science": 1.0,
|
|
"mmlu_pro_economics": 1.0,
|
|
"mmlu_pro_engineering": 1.0,
|
|
"mmlu_pro_health": 1.0,
|
|
"mmlu_pro_history": 1.0,
|
|
"mmlu_pro_law": 1.0,
|
|
"mmlu_pro_math": 1.0,
|
|
"mmlu_pro_other": 1.0,
|
|
"mmlu_pro_philosophy": 1.0,
|
|
"mmlu_pro_physics": 1.0,
|
|
"mmlu_pro_psychology": 1.0
|
|
},
|
|
"n-shot": {
|
|
"mmlu_pro_biology": 5,
|
|
"mmlu_pro_business": 5,
|
|
"mmlu_pro_chemistry": 5,
|
|
"mmlu_pro_computer_science": 5,
|
|
"mmlu_pro_economics": 5,
|
|
"mmlu_pro_engineering": 5,
|
|
"mmlu_pro_health": 5,
|
|
"mmlu_pro_history": 5,
|
|
"mmlu_pro_law": 5,
|
|
"mmlu_pro_math": 5,
|
|
"mmlu_pro_other": 5,
|
|
"mmlu_pro_philosophy": 5,
|
|
"mmlu_pro_physics": 5,
|
|
"mmlu_pro_psychology": 5
|
|
},
|
|
"higher_is_better": {
|
|
"mmlu_pro": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_biology": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_business": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_health": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_history": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_law": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_math": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_other": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"exact_match": true
|
|
}
|
|
},
|
|
"n-samples": {
|
|
"mmlu_pro_biology": {
|
|
"original": 717,
|
|
"effective": 717
|
|
},
|
|
"mmlu_pro_business": {
|
|
"original": 789,
|
|
"effective": 789
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"original": 1132,
|
|
"effective": 1132
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"original": 410,
|
|
"effective": 410
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"original": 844,
|
|
"effective": 844
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"original": 969,
|
|
"effective": 969
|
|
},
|
|
"mmlu_pro_health": {
|
|
"original": 818,
|
|
"effective": 818
|
|
},
|
|
"mmlu_pro_history": {
|
|
"original": 381,
|
|
"effective": 381
|
|
},
|
|
"mmlu_pro_law": {
|
|
"original": 1101,
|
|
"effective": 1101
|
|
},
|
|
"mmlu_pro_math": {
|
|
"original": 1351,
|
|
"effective": 1351
|
|
},
|
|
"mmlu_pro_other": {
|
|
"original": 924,
|
|
"effective": 924
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"original": 499,
|
|
"effective": 499
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"original": 1299,
|
|
"effective": 1299
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"original": 798,
|
|
"effective": 798
|
|
}
|
|
},
|
|
"config": {
|
|
"model": "hf",
|
|
"model_args": "parallelize=False,pretrained=inceptionai/jais-family-6p7b-chat,trust_remote_code=True,mm=False",
|
|
"model_num_parameters": 6794562592,
|
|
"model_dtype": "torch.float32",
|
|
"model_revision": "main",
|
|
"model_sha": "683805efe6126c6536feb4aa23317e70222ac94c",
|
|
"batch_size": 1,
|
|
"batch_sizes": [],
|
|
"device": null,
|
|
"use_cache": null,
|
|
"limit": null,
|
|
"bootstrap_iters": 100000,
|
|
"gen_kwargs": null,
|
|
"random_seed": 0,
|
|
"numpy_seed": 1234,
|
|
"torch_seed": 1234,
|
|
"fewshot_seed": 1234
|
|
},
|
|
"git_hash": "3127d82f",
|
|
"date": 1731252010.0078447,
|
|
"pretty_env_info": "PyTorch version: 2.1.0a0+29c30b1\nIs debug build: False\nCUDA used to build PyTorch: 12.2\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: version 3.27.1\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-5.15.0-1064-azure-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.128\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA A100-SXM4-80GB\nGPU 1: NVIDIA A100-SXM4-80GB\nGPU 2: NVIDIA A100-SXM4-80GB\nGPU 3: NVIDIA A100-SXM4-80GB\nGPU 4: NVIDIA A100-SXM4-80GB\nGPU 5: NVIDIA A100-SXM4-80GB\nGPU 6: NVIDIA A100-SXM4-80GB\nGPU 7: NVIDIA A100-SXM4-80GB\n\nNvidia driver version: 535.161.08\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.4\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 48 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 96\nOn-line CPU(s) list: 0-95\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 7V12 64-Core Processor\nCPU family: 23\nModel: 49\nThread(s) per core: 1\nCore(s) per socket: 48\nSocket(s): 2\nStepping: 0\nBogoMIPS: 4890.87\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl tsc_reliable nonstop_tsc cpuid extd_apicid aperfmperf pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core ssbd vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr rdpru arat umip rdpid\nHypervisor vendor: Microsoft\nVirtualization type: full\nL1d cache: 3 MiB (96 instances)\nL1i cache: 3 MiB (96 instances)\nL2 cache: 48 MiB (96 instances)\nL3 cache: 384 MiB (24 instances)\nNUMA node(s): 4\nNUMA node0 CPU(s): 0-23\nNUMA node1 CPU(s): 24-47\nNUMA node2 CPU(s): 48-71\nNUMA node3 CPU(s): 72-95\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Mitigation; untrained return thunk; SMT disabled\nVulnerability Spec rstack overflow: Mitigation; safe RET, no microcode\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.22.2\n[pip3] pytorch-lightning==2.0.7\n[pip3] pytorch-quantization==2.1.2\n[pip3] torch==2.1.0a0+29c30b1\n[pip3] torch-tensorrt==2.0.0.dev0\n[pip3] torchaudio==2.1.0\n[pip3] torchdata==0.7.0a0\n[pip3] torchmetrics==1.2.0\n[pip3] torchvision==0.16.0a0\n[pip3] triton==2.0.0.dev20221202\n[conda] Could not collect",
|
|
"transformers_version": "4.38.2",
|
|
"upper_git_hash": null,
|
|
"tokenizer_pad_token": [
|
|
"<|endoftext|>",
|
|
"0"
|
|
],
|
|
"tokenizer_eos_token": [
|
|
"<|endoftext|>",
|
|
"0"
|
|
],
|
|
"tokenizer_bos_token": [
|
|
"<|endoftext|>",
|
|
"0"
|
|
],
|
|
"eot_token_id": 0,
|
|
"max_length": 2048,
|
|
"task_hashes": {},
|
|
"model_source": "hf",
|
|
"model_name": "inceptionai/jais-family-6p7b-chat",
|
|
"model_name_sanitized": "inceptionai__jais-family-6p7b-chat",
|
|
"system_instruction": null,
|
|
"system_instruction_sha": null,
|
|
"fewshot_as_multiturn": false,
|
|
"chat_template": null,
|
|
"chat_template_sha": null,
|
|
"start_time": 146328.049742312,
|
|
"end_time": 195242.496724594,
|
|
"total_evaluation_time_seconds": "48914.44698228201"
|
|
} |