[Feature] Add recommendation configs for datasets (#1937)

* feat datasetrefine drop

* fix datasets in fullbench_int3

* fix

* fix

* back

* fix

* fix and doc

* feat

* fix hook

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* doc

* fix

* fix

* Update dataset-index.yml
This commit is contained in:
Myhs_phz 2025-03-25 14:54:13 +08:00 committed by GitHub
parent 07930b854a
commit 6118596362
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
41 changed files with 813 additions and 252 deletions

View File

@ -286,6 +286,8 @@ We have supported a statistical list of all datasets that can be used on this pl
You can quickly find the dataset you need from the list through sorting, filtering, and searching functions.
In addition, we provide a recommended configuration for each dataset, and some datasets also support LLM Judge-based configurations.
Please refer to the dataset statistics chapter of [official document](https://opencompass.org.cn/doc) for details.
<p align="right"><a href="#top">🔝Back to top</a></p>

View File

@ -281,6 +281,8 @@ OpenCompass 是面向大模型评测的一站式平台。其主要特点如下
您可以通过排序、筛选和搜索等功能从列表中快速找到您需要的数据集。
另外我们为每个数据集都提供了一种推荐配置部分数据集还支持了基于LLM Judge的配置。
详情请参阅 [官方文档](https://opencompass.org.cn/doc) 的数据集统计章节。
<p align="right"><a href="#top">🔝返回顶部</a></p>

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,12 @@ On this page, we have listed all the datasets supported by OpenCompass.
You can use sorting and search functions to find the dataset you need.
We provide recommended running configurations for each dataset,
and in some datasets also offer recommended configurations based on LLM Judge.
You can quickly start evaluation tasks based on the recommended configurations.
However, please note that these configurations may be updated over time.
"""
with open('dataset_statistics.md', 'w') as f:
@ -24,7 +30,7 @@ load_path = str(OC_ROOT / 'dataset-index.yml')
with open(load_path, 'r') as f2:
data_list = yaml.load(f2, Loader=yaml.FullLoader)
HEADER = ['name', 'category', 'paper', 'configpath']
HEADER = ['name', 'category', 'paper', 'configpath', 'configpath_llmjudge']
def table_format(data_list):
@ -35,6 +41,13 @@ def table_format(data_list):
for index in HEADER:
if index == 'paper':
table_format_list_sub.append('[link](' + i[j][index] + ')')
elif index == 'configpath_llmjudge':
if i[j][index] == '':
table_format_list_sub.append(i[j][index])
else:
table_format_list_sub.append('[link](' +
GITHUB_PREFIX +
i[j][index] + ')')
elif index == 'configpath':
if isinstance(i[j][index], list):
sub_list_text = ''
@ -61,7 +74,10 @@ def generate_table(data_list, title=None):
if title is not None:
f.write(f'\n{title}')
f.write("""\n```{table}\n:class: dataset\n""")
header = ['Name', 'Category', 'Paper or Repository', 'Config File']
header = [
'Name', 'Category', 'Paper or Repository', 'Recommended Config',
'Recommended Config (LLM Judge)'
]
table_cfg = dict(tablefmt='pipe',
floatfmt='.2f',
numalign='right',

View File

@ -14,6 +14,10 @@ DATASETZOO_TEMPLATE = """\
你可以使用排序和搜索功能找到需要的数据集
我们对每一个数据集都给出了推荐的运行配置部分数据集中还提供了基于LLM Judge的推荐配置
你可以基于推荐配置快速启动评测但请注意推荐配置可能随时间推移被更新
"""
with open('dataset_statistics.md', 'w') as f:
@ -24,7 +28,7 @@ load_path = str(OC_ROOT / 'dataset-index.yml')
with open(load_path, 'r') as f2:
data_list = yaml.load(f2, Loader=yaml.FullLoader)
HEADER = ['name', 'category', 'paper', 'configpath']
HEADER = ['name', 'category', 'paper', 'configpath', 'configpath_llmjudge']
def table_format(data_list):
@ -35,6 +39,12 @@ def table_format(data_list):
for index in HEADER:
if index == 'paper':
table_format_list_sub.append('[链接](' + i[j][index] + ')')
elif index == 'configpath_llmjudge':
if i[j][index] == '':
table_format_list_sub.append(i[j][index])
else:
table_format_list_sub.append('[链接](' + GITHUB_PREFIX +
i[j][index] + ')')
elif index == 'configpath':
if isinstance(i[j][index], list):
sub_list_text = ''
@ -60,7 +70,7 @@ def generate_table(data_list, title=None):
if title is not None:
f.write(f'\n{title}')
f.write("""\n```{table}\n:class: dataset\n""")
header = ['数据集名称', '数据集类型', '原文或资源地址', '配置文件链接']
header = ['数据集名称', '数据集类型', '原文或资源地址', '推荐配置', '推荐配置(基于LLM评估)']
table_cfg = dict(tablefmt='pipe',
floatfmt='.2f',
numalign='right',

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .IFEval_gen_3321a3 import ifeval_datasets # noqa: F401, F403
from .IFEval_gen_353ae7 import ifeval_datasets # noqa: F401, F403

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .aime2024_gen_6e39a4 import aime2024_datasets # noqa: F401, F403
from .aime2024_gen_17d799 import aime2024_datasets # noqa: F401, F403

View File

@ -0,0 +1,40 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import MATHEvaluator
from opencompass.datasets import Aime2024Dataset
aime2024_reader_cfg = dict(
input_columns=['question'],
output_column='answer'
)
aime2024_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{question}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
],
)
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer)
)
aime2024_eval_cfg = dict(
evaluator=dict(type=MATHEvaluator)
)
aime2024_datasets = [
dict(
abbr='aime2024',
type=Aime2024Dataset,
path='opencompass/aime2024',
reader_cfg=aime2024_reader_cfg,
infer_cfg=aime2024_infer_cfg,
eval_cfg=aime2024_eval_cfg,
)
]

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .aime2024_llmjudge_gen_5e9f4f import aime2024_datasets # noqa: F401, F403

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .bbh_gen_5b92b0 import bbh_datasets # noqa: F401, F403
from .bbh_gen_ee62e9 import bbh_datasets # noqa: F401, F403

View File

@ -0,0 +1,99 @@
import os
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import BBHDataset, BBHEvaluator, bbh_mcq_postprocess, BBHEvaluator_mcq
bbh_reader_cfg = dict(input_columns=['input'], output_column='target')
bbh_multiple_choice_sets = [
'temporal_sequences',
'disambiguation_qa',
'date_understanding',
'tracking_shuffled_objects_three_objects',
'penguins_in_a_table',
'geometric_shapes',
'snarks',
'ruin_names',
'tracking_shuffled_objects_seven_objects',
'tracking_shuffled_objects_five_objects',
'logical_deduction_three_objects',
'hyperbaton',
'logical_deduction_five_objects',
'logical_deduction_seven_objects',
'movie_recommendation',
'salient_translation_error_detection',
'reasoning_about_colored_objects',
]
bbh_free_form_sets = [
'multistep_arithmetic_two',
'navigate',
'dyck_languages',
'word_sorting',
'sports_understanding',
'boolean_expressions',
'object_counting',
'formal_fallacies',
'causal_judgement',
'web_of_lies',
]
bbh_datasets = []
for _name in bbh_multiple_choice_sets:
with open(os.path.join(os.path.dirname(__file__), 'lib_prompt', f'{_name}.txt'), 'r') as f:
_hint = f.read()
bbh_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
f"Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: Let's think step by step."
)
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
bbh_eval_cfg = dict(
evaluator=dict(type=BBHEvaluator_mcq),
pred_role='BOT',
pred_postprocessor=dict(type=bbh_mcq_postprocess),
dataset_postprocessor=dict(type=bbh_mcq_postprocess))
bbh_datasets.append(
dict(
type=BBHDataset,
path='opencompass/bbh',
name=_name,
abbr='bbh-' + _name,
reader_cfg=bbh_reader_cfg,
infer_cfg=bbh_infer_cfg.copy(),
eval_cfg=bbh_eval_cfg.copy()))
for _name in bbh_free_form_sets:
with open(os.path.join(os.path.dirname(__file__), 'lib_prompt', f'{_name}.txt'), 'r') as f:
_hint = f.read()
bbh_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
f"Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: Let's think step by step."
)
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
bbh_eval_cfg = dict(evaluator=dict(type=BBHEvaluator), pred_role='BOT')
bbh_datasets.append(
dict(
type=BBHDataset,
path='opencompass/bbh',
name=_name,
abbr='bbh-' + _name,
reader_cfg=bbh_reader_cfg,
infer_cfg=bbh_infer_cfg.copy(),
eval_cfg=bbh_eval_cfg.copy()))

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .bbh_llmjudge_gen_b5bdf1 import bbh_datasets # noqa: F401, F403

View File

@ -0,0 +1,7 @@
from mmengine.config import read_base
with read_base():
from .bigcodebench_hard_instruct_gen import bigcodebench_hard_instruct_datasets
from .bigcodebench_hard_complete_gen import bigcodebench_hard_complete_datasets
bigcodebench_hard_datasets = sum((v for k, v in locals().items() if k.endswith('_ds')), [])

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .bigcodebench_hard_instruct_gen_8815eb import bigcodebench_hard_instruct_datasets # noqa: F401, F403
from .bigcodebench_hard_instruct_gen_c3d5ad import bigcodebench_hard_instruct_datasets # noqa: F401, F403

View File

@ -15,8 +15,9 @@ bigcodebench_hard_infer_cfg = dict(prompt_template=dict(
round=[
dict(role='HUMAN', prompt='{instruct_prompt}'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer))
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer)
)
bigcodebench_hard_eval_cfg = dict(
evaluator=dict(

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .cmmlu_gen_c13365 import cmmlu_datasets # noqa: F401, F403
from .cmmlu_0shot_cot_gen_305931 import cmmlu_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .cmmlu_llmjudge_gen_e1cd9a import cmmlu_datasets # noqa: F401, F403

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .drop_openai_simple_evals_gen_3857b0 import drop_datasets
from .drop_openai_simple_evals_gen_3857b0 import drop_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .drop_llmjudge_gen_3857b0 import drop_datasets # noqa: F401, F403

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .gpqa_openai_simple_evals_gen_5aeece import gpqa_datasets
from .gpqa_openai_simple_evals_gen_5aeece import gpqa_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .gpqa_0shot_nocot_genericllmeval_gen_772ea0 import gpqa_datasets # noqa: F401, F403

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .hellaswag_gen_6faab5 import hellaswag_datasets # noqa: F401, F403
from .hellaswag_10shot_gen_e42710 import hellaswag_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .hellaswag_llmjudge_gen_809ef1 import hellaswag_datasets # noqa: F401, F403

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .humaneval_gen_8e312c import humaneval_datasets # noqa: F401, F403
from .humaneval_openai_sample_evals_gen_dcae0e import humaneval_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .korbench_single_0_shot_gen import korbench_0shot_single_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .korbench_single_0shot_genericllmeval_gen_56cf43 import korbench_0shot_single_datasets # noqa: F401, F403

View File

@ -7,10 +7,9 @@ from opencompass.datasets import generic_llmjudge_postprocess
categories = ['cipher', 'counterfactual', 'logic', 'operation', 'puzzle']
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
@ -25,11 +24,10 @@ GRADER_TEMPLATE = """
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{prompt}\n<Original Question End>\n\n
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
@ -75,18 +73,18 @@ for category in categories:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=korbenchDataset,

View File

@ -37,7 +37,7 @@ for category in categories:
infer_cfg = dict(
prompt_template=prompt_template,
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=1024),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration

View File

@ -7,10 +7,9 @@ from opencompass.datasets import generic_llmjudge_postprocess
categories = ['cipher', 'counterfactual', 'logic', 'operation', 'puzzle']
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
@ -29,7 +28,7 @@ GRADER_TEMPLATE = """
<Original Question Begin>: \n{prompt}\n<Original Question End>\n\n
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
@ -49,7 +48,7 @@ for category in categories:
round=[
dict(
role='HUMAN',
prompt='{prompt}' # f-string
prompt='{prompt}' # f-string
)
]
)
@ -75,18 +74,18 @@ for category in categories:
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
dict(
role='HUMAN',
prompt=GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=korbenchDataset,
@ -114,4 +113,4 @@ for category in categories:
mode='singlescore',
)
korbench_0shot_single_datasets.append(korbench_dataset)
korbench_0shot_single_datasets.append(korbench_dataset)

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .livecodebench_gen_6966bc import LCB_datasets # noqa: F401, F403
from .livecodebench_gen_a4f90b import LCB_datasets # noqa: F401, F403

View File

@ -1,9 +1,9 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import MATHEvaluator
from opencompass.datasets import (
MATHDataset,
MATHEvaluator,
math_postprocess_v2,
normalize_final_answer,
)
@ -28,8 +28,7 @@ math_infer_cfg = dict(
# postprocess v2
math_eval_cfg = dict(
evaluator=dict(type=MATHEvaluator, version='v2'),
pred_postprocessor=dict(type=math_postprocess_v2),
evaluator=dict(type=MATHEvaluator)
)
math_datasets = [

View File

@ -0,0 +1,96 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.evaluator import GenericLLMEvaluator
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.datasets import MATHDataset
# ----------------------------- Detailed Config -----------------------------
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
<Gold Target Begin>: \n{solution}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Evaluation configuration
math_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
],
round=[
dict(
role='HUMAN',
prompt = GRADER_TEMPLATE
),
]),
),
dataset_cfg=dict(
type=MATHDataset,
path='opencompass/math',
file_name = 'test_prm800k_500.json',
reader_cfg=math_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
math_datasets = [
dict(
type=MATHDataset,
abbr='math_prm800k_500-llmjudge',
path='opencompass/math',
file_name = 'test_prm800k_500.json',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
mode='singlescore',
)
]

View File

@ -1,36 +1,4 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import MATHDataset, MATHEvaluator, math_postprocess_v2, normalize_final_answer
from mmengine.config import read_base
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=1024),
)
# postprocess v2
math_eval_cfg = dict(
evaluator=dict(type=MATHEvaluator, version='v2'), pred_postprocessor=dict(type=math_postprocess_v2),
)
math_datasets = [
dict(
type=MATHDataset,
abbr='math_prm800k_500',
path='opencompass/math',
file_name = 'test_prm800k_500.json',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
]
with read_base():
from .math_prm800k_500_0shot_cot_gen import math_datasets # noqa: F401, F403

View File

@ -0,0 +1,36 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import MATHDataset, MATHEvaluator, math_postprocess_v2, normalize_final_answer
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{problem}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=1024),
)
# postprocess v2
math_eval_cfg = dict(
evaluator=dict(type=MATHEvaluator, version='v2'), pred_postprocessor=dict(type=math_postprocess_v2),
)
math_datasets = [
dict(
type=MATHDataset,
abbr='math_prm800k_500',
path='opencompass/math',
file_name = 'test_prm800k_500.json',
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
]

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .math_prm800k_500_0shot_nocot_genericllmeval_gen_6ff468 import math_datasets # noqa: F401, F403

View File

@ -1,4 +1,4 @@
from mmengine.config import read_base
with read_base():
from .mmlu_gen_4d595a import mmlu_datasets # noqa: F401, F403
from .mmlu_openai_simple_evals_gen_b618ea import mmlu_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .mmlu_llmjudge_gen_f4336b import mmlu_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .mmlu_pro_0shot_cot_gen_08c1de import mmlu_pro_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .mmlu_pro_0shot_nocot_genericllmeval_gen_08c1de import mmlu_pro_datasets # noqa: F401, F403

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .musr_llmjudge_gen_b47fd3 import musr_datasets # noqa: F401, F403

View File

@ -94,21 +94,21 @@ def check_and_rename(filepath):
return None, None
def update_imports(data):
python_file, name_pairs = data
for filepath, new_file in name_pairs:
old_name = os.path.basename(filepath)[:-3]
new_name = os.path.basename(new_file)[:-3]
if not os.path.exists(python_file):
return
with open(python_file, 'r') as file:
filedata = file.read()
# Replace the old name with new name
new_data = filedata.replace(old_name, new_name)
if filedata != new_data:
with open(python_file, 'w') as file:
file.write(new_data)
# print(f"Updated imports in {python_file}")
# def update_imports(data):
# python_file, name_pairs = data
# for filepath, new_file in name_pairs:
# old_name = os.path.basename(filepath)[:-3]
# new_name = os.path.basename(new_file)[:-3]
# if not os.path.exists(python_file):
# return
# with open(python_file, 'r') as file:
# filedata = file.read()
# # Replace the old name with new name
# new_data = filedata.replace(old_name, new_name)
# if filedata != new_data:
# with open(python_file, 'w') as file:
# file.write(new_data)
# # print(f"Updated imports in {python_file}")
def main():
@ -134,11 +134,11 @@ def main():
return
with Pool(16) as p:
p.starmap(os.rename, name_pairs)
root_folder = 'configs'
python_files = glob.glob(f'{root_folder}/**/*.py', recursive=True)
update_data = [(python_file, name_pairs) for python_file in python_files]
with Pool(16) as p:
p.map(update_imports, update_data)
# root_folder = 'configs'
# python_files = glob.glob(f'{root_folder}/**/*.py', recursive=True)
# update_data = [(python_file, name_pairs) for python_file in python_files]
# with Pool(16) as p:
# p.map(update_imports, update_data)
if __name__ == '__main__':