1103 lines
38 KiB
JSON
1103 lines
38 KiB
JSON
{
|
|
"results": {
|
|
"mmlu_pro": {
|
|
"exact_match,custom-extract": 0.44921875,
|
|
"exact_match_stderr,custom-extract": 0.004329079184586284,
|
|
"alias": "mmlu_pro"
|
|
},
|
|
"mmlu_pro_biology": {
|
|
"alias": " - biology",
|
|
"exact_match,custom-extract": 0.6889818688981869,
|
|
"exact_match_stderr,custom-extract": 0.0172997664121759
|
|
},
|
|
"mmlu_pro_business": {
|
|
"alias": " - business",
|
|
"exact_match,custom-extract": 0.5031685678073511,
|
|
"exact_match_stderr,custom-extract": 0.017811404839538456
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"alias": " - chemistry",
|
|
"exact_match,custom-extract": 0.20759717314487633,
|
|
"exact_match_stderr,custom-extract": 0.01206014205513508
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"alias": " - computer_science",
|
|
"exact_match,custom-extract": 0.4878048780487805,
|
|
"exact_match_stderr,custom-extract": 0.024716053947583156
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"alias": " - economics",
|
|
"exact_match,custom-extract": 0.6504739336492891,
|
|
"exact_match_stderr,custom-extract": 0.01642256336675628
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"alias": " - engineering",
|
|
"exact_match,custom-extract": 0.22910216718266255,
|
|
"exact_match_stderr,custom-extract": 0.013507511079119967
|
|
},
|
|
"mmlu_pro_health": {
|
|
"alias": " - health",
|
|
"exact_match,custom-extract": 0.5488997555012225,
|
|
"exact_match_stderr,custom-extract": 0.017408927699949964
|
|
},
|
|
"mmlu_pro_history": {
|
|
"alias": " - history",
|
|
"exact_match,custom-extract": 0.49868766404199477,
|
|
"exact_match_stderr,custom-extract": 0.025649370453664066
|
|
},
|
|
"mmlu_pro_law": {
|
|
"alias": " - law",
|
|
"exact_match,custom-extract": 0.3315168029064487,
|
|
"exact_match_stderr,custom-extract": 0.014193897930164855
|
|
},
|
|
"mmlu_pro_math": {
|
|
"alias": " - math",
|
|
"exact_match,custom-extract": 0.43671354552183567,
|
|
"exact_match_stderr,custom-extract": 0.013498829158543524
|
|
},
|
|
"mmlu_pro_other": {
|
|
"alias": " - other",
|
|
"exact_match,custom-extract": 0.5281385281385281,
|
|
"exact_match_stderr,custom-extract": 0.016431618149469095
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"alias": " - philosophy",
|
|
"exact_match,custom-extract": 0.49298597194388777,
|
|
"exact_match_stderr,custom-extract": 0.022403331087051327
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"alias": " - physics",
|
|
"exact_match,custom-extract": 0.35411855273287146,
|
|
"exact_match_stderr,custom-extract": 0.013274354114304878
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"alias": " - psychology",
|
|
"exact_match,custom-extract": 0.6516290726817042,
|
|
"exact_match_stderr,custom-extract": 0.016876874376786855
|
|
}
|
|
},
|
|
"groups": {
|
|
"mmlu_pro": {
|
|
"exact_match,custom-extract": 0.44921875,
|
|
"exact_match_stderr,custom-extract": 0.004329079184586284,
|
|
"alias": "mmlu_pro"
|
|
}
|
|
},
|
|
"group_subtasks": {
|
|
"mmlu_pro": [
|
|
"mmlu_pro_biology",
|
|
"mmlu_pro_business",
|
|
"mmlu_pro_chemistry",
|
|
"mmlu_pro_computer_science",
|
|
"mmlu_pro_economics",
|
|
"mmlu_pro_engineering",
|
|
"mmlu_pro_health",
|
|
"mmlu_pro_history",
|
|
"mmlu_pro_law",
|
|
"mmlu_pro_math",
|
|
"mmlu_pro_other",
|
|
"mmlu_pro_philosophy",
|
|
"mmlu_pro_physics",
|
|
"mmlu_pro_psychology"
|
|
]
|
|
},
|
|
"configs": {
|
|
"mmlu_pro_biology": {
|
|
"task": "mmlu_pro_biology",
|
|
"task_alias": "biology",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f726b9fc0>, subject='biology')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726ba830>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about biology. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b9a20>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_business": {
|
|
"task": "mmlu_pro_business",
|
|
"task_alias": "business",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f72603760>, subject='business')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726ba0e0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about business. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b9630>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"task": "mmlu_pro_chemistry",
|
|
"task_alias": "chemistry",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f72603c70>, subject='chemistry')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b9c60>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about chemistry. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726bb130>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"task": "mmlu_pro_computer_science",
|
|
"task_alias": "computer_science",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f72603b50>, subject='computer science')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72602b00>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about computer science. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b8700>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"task": "mmlu_pro_economics",
|
|
"task_alias": "economics",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f726b9480>, subject='economics')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b9510>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about economics. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b8af0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"task": "mmlu_pro_engineering",
|
|
"task_alias": "engineering",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f726bb6d0>, subject='engineering')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b88b0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about engineering. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b8790>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_health": {
|
|
"task": "mmlu_pro_health",
|
|
"task_alias": "health",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f726028c0>, subject='health')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72602560>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about health. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72602710>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_history": {
|
|
"task": "mmlu_pro_history",
|
|
"task_alias": "history",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f72603be0>, subject='history')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72603f40>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about history. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726039a0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_law": {
|
|
"task": "mmlu_pro_law",
|
|
"task_alias": "law",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f726023b0>, subject='law')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726037f0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about law. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72602a70>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_math": {
|
|
"task": "mmlu_pro_math",
|
|
"task_alias": "math",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f726b9990>, subject='math')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726ba5f0>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about math. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726ba4d0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_other": {
|
|
"task": "mmlu_pro_other",
|
|
"task_alias": "other",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f726b80d0>, subject='other')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b8040>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about other. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726b81f0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"task": "mmlu_pro_philosophy",
|
|
"task_alias": "philosophy",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f72603400>, subject='philosophy')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72603520>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about philosophy. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72602d40>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"task": "mmlu_pro_physics",
|
|
"task_alias": "physics",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f72603250>, subject='physics')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72603d00>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about physics. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f726032e0>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"task": "mmlu_pro_psychology",
|
|
"task_alias": "psychology",
|
|
"dataset_path": "TIGER-Lab/MMLU-Pro",
|
|
"test_split": "test",
|
|
"fewshot_split": "validation",
|
|
"process_docs": "functools.partial(<function process_docs at 0x149f729165f0>, subject='psychology')",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x14a252dda710>, including_answer=False)",
|
|
"doc_to_target": "answer",
|
|
"description": "The following are multiple choice questions (with answers) about psychology. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.\n",
|
|
"target_delimiter": " ",
|
|
"fewshot_delimiter": "\n\n",
|
|
"fewshot_config": {
|
|
"sampler": "first_n",
|
|
"doc_to_text": "functools.partial(<function format_cot_example at 0x149f72914430>, including_answer=True)",
|
|
"doc_to_target": ""
|
|
},
|
|
"num_fewshot": 5,
|
|
"metric_list": [
|
|
{
|
|
"metric": "exact_match",
|
|
"aggregation": "mean",
|
|
"higher_is_better": true,
|
|
"ignore_case": true,
|
|
"ignore_punctuation": true
|
|
}
|
|
],
|
|
"output_type": "generate_until",
|
|
"generation_kwargs": {
|
|
"until": [
|
|
"</s>",
|
|
"Q:",
|
|
"<|im_end|>"
|
|
],
|
|
"do_sample": false,
|
|
"temperature": 0.0
|
|
},
|
|
"repeats": 1,
|
|
"filter_list": [
|
|
{
|
|
"name": "custom-extract",
|
|
"filter": [
|
|
{
|
|
"function": "regex",
|
|
"regex_pattern": "answer is \\(?([ABCDEFGHIJ])\\)?"
|
|
},
|
|
{
|
|
"function": "take_first"
|
|
}
|
|
]
|
|
}
|
|
],
|
|
"should_decontaminate": false,
|
|
"metadata": {
|
|
"version": 1.0
|
|
}
|
|
}
|
|
},
|
|
"versions": {
|
|
"mmlu_pro": 2.0,
|
|
"mmlu_pro_biology": 1.0,
|
|
"mmlu_pro_business": 1.0,
|
|
"mmlu_pro_chemistry": 1.0,
|
|
"mmlu_pro_computer_science": 1.0,
|
|
"mmlu_pro_economics": 1.0,
|
|
"mmlu_pro_engineering": 1.0,
|
|
"mmlu_pro_health": 1.0,
|
|
"mmlu_pro_history": 1.0,
|
|
"mmlu_pro_law": 1.0,
|
|
"mmlu_pro_math": 1.0,
|
|
"mmlu_pro_other": 1.0,
|
|
"mmlu_pro_philosophy": 1.0,
|
|
"mmlu_pro_physics": 1.0,
|
|
"mmlu_pro_psychology": 1.0
|
|
},
|
|
"n-shot": {
|
|
"mmlu_pro_biology": 5,
|
|
"mmlu_pro_business": 5,
|
|
"mmlu_pro_chemistry": 5,
|
|
"mmlu_pro_computer_science": 5,
|
|
"mmlu_pro_economics": 5,
|
|
"mmlu_pro_engineering": 5,
|
|
"mmlu_pro_health": 5,
|
|
"mmlu_pro_history": 5,
|
|
"mmlu_pro_law": 5,
|
|
"mmlu_pro_math": 5,
|
|
"mmlu_pro_other": 5,
|
|
"mmlu_pro_philosophy": 5,
|
|
"mmlu_pro_physics": 5,
|
|
"mmlu_pro_psychology": 5
|
|
},
|
|
"higher_is_better": {
|
|
"mmlu_pro": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_biology": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_business": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_health": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_history": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_law": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_math": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_other": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"exact_match": true
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"exact_match": true
|
|
}
|
|
},
|
|
"n-samples": {
|
|
"mmlu_pro_biology": {
|
|
"original": 717,
|
|
"effective": 717
|
|
},
|
|
"mmlu_pro_business": {
|
|
"original": 789,
|
|
"effective": 789
|
|
},
|
|
"mmlu_pro_chemistry": {
|
|
"original": 1132,
|
|
"effective": 1132
|
|
},
|
|
"mmlu_pro_computer_science": {
|
|
"original": 410,
|
|
"effective": 410
|
|
},
|
|
"mmlu_pro_economics": {
|
|
"original": 844,
|
|
"effective": 844
|
|
},
|
|
"mmlu_pro_engineering": {
|
|
"original": 969,
|
|
"effective": 969
|
|
},
|
|
"mmlu_pro_health": {
|
|
"original": 818,
|
|
"effective": 818
|
|
},
|
|
"mmlu_pro_history": {
|
|
"original": 381,
|
|
"effective": 381
|
|
},
|
|
"mmlu_pro_law": {
|
|
"original": 1101,
|
|
"effective": 1101
|
|
},
|
|
"mmlu_pro_math": {
|
|
"original": 1351,
|
|
"effective": 1351
|
|
},
|
|
"mmlu_pro_other": {
|
|
"original": 924,
|
|
"effective": 924
|
|
},
|
|
"mmlu_pro_philosophy": {
|
|
"original": 499,
|
|
"effective": 499
|
|
},
|
|
"mmlu_pro_physics": {
|
|
"original": 1299,
|
|
"effective": 1299
|
|
},
|
|
"mmlu_pro_psychology": {
|
|
"original": 798,
|
|
"effective": 798
|
|
}
|
|
},
|
|
"config": {
|
|
"model": "vllm",
|
|
"model_args": "pretrained=Qwen/Qwen2.5-7B-Instruct,tensor_parallel_size=2,data_parallel_size=4,download_dir=/tmp",
|
|
"batch_size": 1,
|
|
"batch_sizes": [],
|
|
"device": null,
|
|
"use_cache": null,
|
|
"limit": null,
|
|
"bootstrap_iters": 100000,
|
|
"gen_kwargs": null,
|
|
"random_seed": 0,
|
|
"numpy_seed": 1234,
|
|
"torch_seed": 1234,
|
|
"fewshot_seed": 1234
|
|
},
|
|
"git_hash": "788a3672",
|
|
"date": 1738827469.6751115,
|
|
"pretty_env_info": "PyTorch version: 2.4.0+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.3 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: version 3.27.1\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-5.15.0-1064-azure-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.2.128\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA H100 80GB HBM3\nGPU 1: NVIDIA H100 80GB HBM3\nGPU 2: NVIDIA H100 80GB HBM3\nGPU 3: NVIDIA H100 80GB HBM3\nGPU 4: NVIDIA H100 80GB HBM3\nGPU 5: NVIDIA H100 80GB HBM3\nGPU 6: NVIDIA H100 80GB HBM3\nGPU 7: NVIDIA H100 80GB HBM3\n\nNvidia driver version: 535.161.08\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.4\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.4\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 57 bits virtual\nByte Order: Little Endian\nCPU(s): 96\nOn-line CPU(s) list: 0-95\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) Platinum 8480C\nCPU family: 6\nModel: 143\nThread(s) per core: 1\nCore(s) per socket: 48\nSocket(s): 2\nStepping: 8\nBogoMIPS: 4000.00\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid aperfmperf pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx_vnni avx512_bf16 avx512vbmi umip waitpkg avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq la57 rdpid cldemote movdiri movdir64b fsrm serialize amx_bf16 avx512_fp16 amx_tile amx_int8 arch_capabilities\nHypervisor vendor: Microsoft\nVirtualization type: full\nL1d cache: 4.5 MiB (96 instances)\nL1i cache: 3 MiB (96 instances)\nL2 cache: 192 MiB (96 instances)\nL3 cache: 210 MiB (2 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-47\nNUMA node1 CPU(s): 48-95\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Unknown: No mitigations\nVulnerability Retbleed: Vulnerable\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Vulnerable\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI Retpoline\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] onnx==1.14.0\n[pip3] pytorch-lightning==2.0.7\n[pip3] pytorch-quantization==2.1.2\n[pip3] torch==2.4.0\n[pip3] torch-tensorrt==2.0.0.dev0\n[pip3] torchaudio==2.1.0\n[pip3] torchdata==0.7.0a0\n[pip3] torchmetrics==1.2.0\n[pip3] torchvision==0.19.0\n[pip3] triton==3.0.0\n[conda] Could not collect",
|
|
"transformers_version": "4.48.2",
|
|
"upper_git_hash": null,
|
|
"tokenizer_pad_token": [
|
|
"<|endoftext|>",
|
|
"151643"
|
|
],
|
|
"tokenizer_eos_token": [
|
|
"<|im_end|>",
|
|
"151645"
|
|
],
|
|
"tokenizer_bos_token": [
|
|
null,
|
|
"None"
|
|
],
|
|
"eot_token_id": 151645,
|
|
"max_length": 32768,
|
|
"task_hashes": {
|
|
"mmlu_pro_biology": "78a27f3d4ea386dd0f7b5045f25bf654ba560ee9feac7b22eab763c73b4c37b9",
|
|
"mmlu_pro_business": "9d10f8702f23d8d5aa9546ebf453e9333a6998a272450bc468b8f74bca8a1824",
|
|
"mmlu_pro_chemistry": "0e3a8823fed7bd895e42f5053851f12b125f62edfcb36809e4c0aebec80f4506",
|
|
"mmlu_pro_computer_science": "26e8d9026807a7552684e4ddd1a373873449548e0f0ac8abeada18f32cc5f685",
|
|
"mmlu_pro_economics": "427580d476e69dc8f095f487f3081cbff1dbfdd3a05a4c13c024ae5bd6907262",
|
|
"mmlu_pro_engineering": "66bc34b22bf2c19eab04a753e65e8aea2e6834544b27516a6aa2769a9be0b9e5",
|
|
"mmlu_pro_health": "62edd914028ea5b83013192e458af0d22b843d25ce0ac6e280244d819615cdc4",
|
|
"mmlu_pro_history": "8295796e4901f2a6b42a2bd8b6e888f2e64ae24ce451f8ecef70db6351f3583d",
|
|
"mmlu_pro_law": "6969a0ecb6ac565ee29e658094231ddcf1016237aff3d903f5d219dd68a2e5dd",
|
|
"mmlu_pro_math": "eb48989afd83cb45e2dfd8c769fbe986927de9eb06ac775a7237e939150f20ec",
|
|
"mmlu_pro_other": "82e12fde3ce84ca4d478ce4623e9dd3877b8bd46c7fc1346c3d9e534df9cbba3",
|
|
"mmlu_pro_philosophy": "1cd86d5d342a6029560af9a2d51e397df4f537d81d4e6249a0917267c91073e1",
|
|
"mmlu_pro_physics": "dce786711af6f503b9b1463ca9e245de515859363f4ee7f0aa94656c3357a288",
|
|
"mmlu_pro_psychology": "526f25dba79a26df39f911b7d6010990c8e21d7c473c89a94e4298566d7cdeda"
|
|
},
|
|
"model_source": "vllm",
|
|
"model_name": "Qwen/Qwen2.5-7B-Instruct",
|
|
"model_name_sanitized": "Qwen__Qwen2.5-7B-Instruct",
|
|
"system_instruction": null,
|
|
"system_instruction_sha": null,
|
|
"fewshot_as_multiturn": false,
|
|
"chat_template": null,
|
|
"chat_template_sha": null,
|
|
"start_time": 628198.351009288,
|
|
"end_time": 628560.3023318,
|
|
"total_evaluation_time_seconds": "361.95132251200266"
|
|
} |