From 3dbba11945463a3ca28e5610d23b38225f8357d4 Mon Sep 17 00:00:00 2001 From: Xu Song Date: Tue, 14 May 2024 15:03:28 +0800 Subject: [PATCH] [Feat] Support dataset_suffix check for mixed configs (#973) * [Feat] Support dataset_suffix check for mixed configs * update mixed suffix * update suffix --------- Co-authored-by: Leymore --- .../datasets/GaokaoBench/GaokaoBench_mixed.py | 2 +- ..._f2038e.py => GaokaoBench_mixed_9af5ee.py} | 0 ...mathbench_2024_wocircular_mixed_dc0207.py} | 0 .../datasets/agieval/agieval_mixed_0fa998.py | 7 + .../datasets/agieval/agieval_mixed_2f14ad.py | 218 ------------------ configs/datasets/collections/base_medium.py | 4 +- .../datasets/collections/base_medium_llama.py | 4 +- .../datasets/collections/leaderboard/qwen.py | 4 +- .../collections/leaderboard/qwen_chat.py | 2 +- configs/eval_hf_llama2.py | 2 +- configs/eval_internlm2_keyset.py | 2 +- tools/update_dataset_suffix.py | 2 +- 12 files changed, 18 insertions(+), 229 deletions(-) rename configs/datasets/GaokaoBench/{GaokaoBench_mixed_f2038e.py => GaokaoBench_mixed_9af5ee.py} (100%) rename configs/datasets/MathBench/{mathbench_2024_wocircular_mixed_649349.py => mathbench_2024_wocircular_mixed_dc0207.py} (100%) delete mode 100644 configs/datasets/agieval/agieval_mixed_2f14ad.py diff --git a/configs/datasets/GaokaoBench/GaokaoBench_mixed.py b/configs/datasets/GaokaoBench/GaokaoBench_mixed.py index baeccf45..a186eece 100644 --- a/configs/datasets/GaokaoBench/GaokaoBench_mixed.py +++ b/configs/datasets/GaokaoBench/GaokaoBench_mixed.py @@ -1,4 +1,4 @@ from mmengine.config import read_base with read_base(): - from .GaokaoBench_mixed_f2038e import GaokaoBench_datasets # noqa: F401, F403 + from .GaokaoBench_mixed_9af5ee import GaokaoBench_datasets # noqa: F401, F403 diff --git a/configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py b/configs/datasets/GaokaoBench/GaokaoBench_mixed_9af5ee.py similarity index 100% rename from configs/datasets/GaokaoBench/GaokaoBench_mixed_f2038e.py rename to configs/datasets/GaokaoBench/GaokaoBench_mixed_9af5ee.py diff --git a/configs/datasets/MathBench/mathbench_2024_wocircular_mixed_649349.py b/configs/datasets/MathBench/mathbench_2024_wocircular_mixed_dc0207.py similarity index 100% rename from configs/datasets/MathBench/mathbench_2024_wocircular_mixed_649349.py rename to configs/datasets/MathBench/mathbench_2024_wocircular_mixed_dc0207.py diff --git a/configs/datasets/agieval/agieval_mixed_0fa998.py b/configs/datasets/agieval/agieval_mixed_0fa998.py index 5c5b40b8..552a1bad 100644 --- a/configs/datasets/agieval/agieval_mixed_0fa998.py +++ b/configs/datasets/agieval/agieval_mixed_0fa998.py @@ -75,6 +75,13 @@ for _name in agieval_single_choice_sets: _hint = '答案是:' else: _hint = 'The answer is ' + agieval_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template={ + label: dict(round=[ + dict(role='HUMAN', prompt='{question}\n{options}'), + dict(role='BOT', prompt=f'{_hint}{label}') ]) for label in _options }), diff --git a/configs/datasets/agieval/agieval_mixed_2f14ad.py b/configs/datasets/agieval/agieval_mixed_2f14ad.py deleted file mode 100644 index 169f8fe9..00000000 --- a/configs/datasets/agieval/agieval_mixed_2f14ad.py +++ /dev/null @@ -1,218 +0,0 @@ -from opencompass.openicl.icl_prompt_template import PromptTemplate -from opencompass.openicl.icl_retriever import ZeroRetriever -from opencompass.openicl.icl_inferencer import PPLInferencer, GenInferencer -from opencompass.openicl.icl_evaluator import AccEvaluator -from opencompass.datasets import AGIEvalDataset_v2, AGIEvalEvaluator, AGIEvalEvaluator_mcq -from opencompass.utils.text_postprocessors import first_capital_postprocess_multi - -agieval_single_choice_sets = [ - 'gaokao-chinese', - 'gaokao-english', - 'gaokao-geography', - 'gaokao-history', - 'gaokao-biology', - 'gaokao-chemistry', - 'gaokao-mathqa', - 'logiqa-zh', - 'lsat-ar', - 'lsat-lr', - 'lsat-rc', - 'logiqa-en', - 'sat-math', - 'sat-en', - 'sat-en-without-passage', - 'aqua-rat', -] -agieval_multiple_choices_sets = [ - 'gaokao-physics', - 'jec-qa-kd', - 'jec-qa-ca', -] -agieval_cloze_sets = ['gaokao-mathcloze', 'math'] -agieval_chinese_sets = [ - 'gaokao-chinese', - 'gaokao-english', - 'gaokao-geography', - 'gaokao-history', - 'gaokao-biology', - 'gaokao-chemistry', - 'gaokao-physics', - 'gaokao-mathqa', - 'logiqa-zh', - 'gaokao-mathcloze', -] -agieval_english_sets = [ - 'lsat-ar', - 'lsat-lr', - 'lsat-rc', - 'logiqa-en', - 'sat-math', - 'sat-en', - 'sat-en-without-passage', - 'aqua-rat', - 'math', -] -agieval_gaokao_sets = [ - 'gaokao-chinese', - 'gaokao-english', - 'gaokao-geography', - 'gaokao-history', - 'gaokao-biology', - 'gaokao-chemistry', - 'gaokao-physics', - 'gaokao-mathqa', -] - -agieval_datasets = [] -for _name in agieval_single_choice_sets: - if _name in ['lsat-ar', 'lsat-lr', 'lsat-rc', 'aqua-rat']: - _options = ['A', 'B', 'C', 'D', 'E'] - else: - _options = ['A', 'B', 'C', 'D'] - if _name in agieval_chinese_sets: - _hint = '答案是:' - else: - _hint = 'The answer is ' - agieval_infer_cfg = dict( - prompt_template=dict( - type=PromptTemplate, - template={ - label: dict(round=[ - dict(role='HUMAN', prompt='{question}\n{options}'), - dict(role='BOT', prompt=f'{_hint}{label}') - ]) - for label in _options - }), - retriever=dict(type=ZeroRetriever), - inferencer=dict(type=PPLInferencer, labels=_options)) - - agieval_eval_cfg = dict(evaluator=dict(type=AccEvaluator)) - - agieval_datasets.append( - dict( - type=AGIEvalDataset_v2, - path='./data/AGIEval/data/v1/', - name=_name, - abbr='agieval-' + _name, - setting_name='zero-shot', - reader_cfg=dict( - input_columns=['question', 'options'] + _options, - output_column='label'), - infer_cfg=agieval_infer_cfg.copy(), - eval_cfg=agieval_eval_cfg.copy())) - -for _name in agieval_multiple_choices_sets: - if _name in agieval_chinese_sets: - _hint = '答案是: ' - else: - _hint = 'The answer is ' - agieval_infer_cfg = dict( - prompt_template=dict( - type=PromptTemplate, - template=dict(round=[ - dict(role='HUMAN', prompt=f'{{question}}\n{{options}}\n{_hint}') - ])), - retriever=dict(type=ZeroRetriever), - inferencer=dict(type=GenInferencer, max_out_len=1024)) - - agieval_eval_cfg = dict( - evaluator=dict(type=AGIEvalEvaluator_mcq), - pred_postprocessor=dict(type=first_capital_postprocess_multi)) - - agieval_datasets.append( - dict( - type=AGIEvalDataset_v2, - path='./data/AGIEval/data/v1/', - name=_name, - abbr='agieval-' + _name, - setting_name='zero-shot', - reader_cfg=dict( - input_columns=['question', 'options'], output_column='label'), - infer_cfg=agieval_infer_cfg.copy(), - eval_cfg=agieval_eval_cfg.copy())) - -for _name in agieval_cloze_sets: - if _name in agieval_chinese_sets: - _hint = '答案是:' - else: - _hint = 'The answer is ' - agieval_infer_cfg = dict( - prompt_template=dict( - type=PromptTemplate, - template=dict( - round=[dict(role='HUMAN', prompt=f'{{question}}{_hint}')])), - retriever=dict(type=ZeroRetriever), - inferencer=dict(type=GenInferencer, max_out_len=1024)) - - agieval_eval_cfg = dict(evaluator=dict(type=AGIEvalEvaluator)) - - agieval_datasets.append( - dict( - type=AGIEvalDataset_v2, - path='./data/AGIEval/data/v1/', - name=_name, - abbr='agieval-' + _name, - setting_name='zero-shot', - reader_cfg=dict( - input_columns=['question', 'options'], output_column='label'), - infer_cfg=agieval_infer_cfg.copy(), - eval_cfg=agieval_eval_cfg.copy())) - -for _item in agieval_datasets: - _name = _item['name'] - _intro = { - 'gaokao-chinese': - '以下是一道中国高考语文选择题,请选择正确的答案。', - 'gaokao-english': - '以下是一道中国高考英语选择题,请选择正确的答案。', - 'gaokao-geography': - '以下是一道中国高考地理选择题,请选择正确的答案。', - 'gaokao-history': - '以下是一道中国高考历史选择题,请选择正确的答案。', - 'gaokao-biology': - '以下是一道中国高考生物选择题,请选择正确的答案。', - 'gaokao-chemistry': - '以下是一道中国高考化学选择题,请选择正确的答案。', - 'gaokao-physics': - '以下是一道中国高考物理选择题,请选择正确的答案。', - 'gaokao-mathqa': - '以下是一道中国高考数学选择题,请选择正确的答案。', - 'logiqa-zh': - '以下是一道中国公务员考试题,请选择正确的答案。', - 'lsat-ar': - 'The following is a LSAT Analytical Reasoning question. Please select the correct answer.', - 'lsat-lr': - 'The following is a LSAT Logical Reasoning question. Please select the correct answer.', - 'lsat-rc': - 'The following is a LSAT Reading Comprehension question. Please select the correct answer.', - 'logiqa-en': - 'The following is a Logic Reasoning question. Please select the correct answer.', - 'sat-math': - 'The following is a SAT Math question. Please select the correct answer.', - 'sat-en': - 'The following is a SAT English question. Please select the correct answer.', - 'sat-en-without-passage': - 'The following is a SAT English question. Please select the correct answer.', - 'aqua-rat': - 'The following is a AQUA-RAT question. Please select the correct answer.', - 'jec-qa-kd': - '以下是一道中国司法考试基础知识题,请选择正确的答案。', - 'jec-qa-ca': - '以下是一道中国司法考试案例分析题,请选择正确的答案。', - 'gaokao-mathcloze': - '以下是一道中国高考数学填空题,请填入正确的答案。', - 'math': - 'The following is a Math question. Please select the correct answer.', - }[_name] - _templates = _item['infer_cfg']['prompt_template']['template'] - - if _item['infer_cfg']['inferencer']['type'] == PPLInferencer: - for _label in _templates: - _templates[_label]['round'][0][ - 'prompt'] = _intro + '\n' + _templates[_label]['round'][0][ - 'prompt'] - else: - _templates['round'][0][ - 'prompt'] = _intro + '\n' + _templates['round'][0]['prompt'] - -del _item, _intro, _templates, _label, _name, _options, _hint, agieval_infer_cfg, agieval_eval_cfg diff --git a/configs/datasets/collections/base_medium.py b/configs/datasets/collections/base_medium.py index 3b61cae0..1af23918 100644 --- a/configs/datasets/collections/base_medium.py +++ b/configs/datasets/collections/base_medium.py @@ -3,8 +3,8 @@ from mmengine.config import read_base with read_base(): from ..mmlu.mmlu_ppl_ac766d import mmlu_datasets from ..ceval.ceval_ppl_578f8d import ceval_datasets - from ..agieval.agieval_mixed_2f14ad import agieval_datasets - from ..GaokaoBench.GaokaoBench_mixed_f2038e import GaokaoBench_datasets + from ..agieval.agieval_mixed_713d14 import agieval_datasets + from ..GaokaoBench.GaokaoBench_mixed_9af5ee import GaokaoBench_datasets from ..bbh.bbh_gen_5b92b0 import bbh_datasets from ..humaneval.humaneval_gen_8e312c import humaneval_datasets from ..mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets diff --git a/configs/datasets/collections/base_medium_llama.py b/configs/datasets/collections/base_medium_llama.py index 968e3bfd..e36a51f5 100644 --- a/configs/datasets/collections/base_medium_llama.py +++ b/configs/datasets/collections/base_medium_llama.py @@ -3,8 +3,8 @@ from mmengine.config import read_base with read_base(): from ..mmlu.mmlu_ppl_ac766d import mmlu_datasets from ..ceval.ceval_ppl_578f8d import ceval_datasets - from ..agieval.agieval_mixed_2f14ad import agieval_datasets - from ..GaokaoBench.GaokaoBench_mixed_f2038e import GaokaoBench_datasets + from ..agieval.agieval_mixed_713d14 import agieval_datasets + from ..GaokaoBench.GaokaoBench_mixed_9af5ee import GaokaoBench_datasets from ..bbh.bbh_gen_5b92b0 import bbh_datasets from ..humaneval.humaneval_gen_a82cae import humaneval_datasets from ..mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets diff --git a/configs/datasets/collections/leaderboard/qwen.py b/configs/datasets/collections/leaderboard/qwen.py index 6e0c8cab..fbaf660d 100644 --- a/configs/datasets/collections/leaderboard/qwen.py +++ b/configs/datasets/collections/leaderboard/qwen.py @@ -2,10 +2,10 @@ from mmengine.config import read_base with read_base(): from ...ceval.ceval_ppl_578f8d import ceval_datasets - from ...agieval.agieval_mixed_2f14ad import agieval_datasets + from ...agieval.agieval_mixed_713d14 import agieval_datasets from ...mmlu.mmlu_ppl_ac766d import mmlu_datasets from ...cmmlu.cmmlu_ppl_8b9c76 import cmmlu_datasets - from ...GaokaoBench.GaokaoBench_mixed_f2038e import GaokaoBench_datasets + from ...GaokaoBench.GaokaoBench_mixed_9af5ee import GaokaoBench_datasets from ...ARC_c.ARC_c_gen_1e0de5 import ARC_c_datasets from ...ARC_e.ARC_e_gen_1e0de5 import ARC_e_datasets diff --git a/configs/datasets/collections/leaderboard/qwen_chat.py b/configs/datasets/collections/leaderboard/qwen_chat.py index d1c4d851..918a8a05 100644 --- a/configs/datasets/collections/leaderboard/qwen_chat.py +++ b/configs/datasets/collections/leaderboard/qwen_chat.py @@ -2,7 +2,7 @@ from mmengine.config import read_base with read_base(): from ...ceval.ceval_gen_5f30c7 import ceval_datasets - from ...agieval.agieval_mixed_2f14ad import agieval_datasets + from ...agieval.agieval_mixed_713d14 import agieval_datasets from ...mmlu.mmlu_gen_4d595a import mmlu_datasets from ...cmmlu.cmmlu_gen_c13365 import cmmlu_datasets from ...GaokaoBench.GaokaoBench_gen_5cfe9e import GaokaoBench_datasets diff --git a/configs/eval_hf_llama2.py b/configs/eval_hf_llama2.py index bec70c16..b595d404 100644 --- a/configs/eval_hf_llama2.py +++ b/configs/eval_hf_llama2.py @@ -6,7 +6,7 @@ with read_base(): from .datasets.nq.nq_open_gen_e93f8a import nq_datasets from .datasets.gsm8k.gsm8k_gen_3309bd import gsm8k_datasets from .datasets.humaneval.humaneval_gen_a82cae import humaneval_datasets - from .datasets.agieval.agieval_mixed_2f14ad import agieval_datasets + from .datasets.agieval.agieval_mixed_713d14 import agieval_datasets from .datasets.SuperGLUE_BoolQ.SuperGLUE_BoolQ_ppl_314797 import BoolQ_datasets from .datasets.hellaswag.hellaswag_ppl_a6e128 import hellaswag_datasets from .datasets.obqa.obqa_ppl_6aac9e import obqa_datasets diff --git a/configs/eval_internlm2_keyset.py b/configs/eval_internlm2_keyset.py index 497b11d1..a1ce376c 100644 --- a/configs/eval_internlm2_keyset.py +++ b/configs/eval_internlm2_keyset.py @@ -2,7 +2,7 @@ from mmengine.config import read_base with read_base(): from .datasets.mmlu.mmlu_ppl_ac766d import mmlu_datasets - from .datasets.agieval.agieval_mixed_2f14ad import agieval_datasets + from .datasets.agieval.agieval_mixed_713d14 import agieval_datasets from .datasets.bbh.bbh_gen_5b92b0 import bbh_datasets from .datasets.gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets from .datasets.math.math_gen_265cce import math_datasets diff --git a/tools/update_dataset_suffix.py b/tools/update_dataset_suffix.py index 8738dc26..a49f37eb 100755 --- a/tools/update_dataset_suffix.py +++ b/tools/update_dataset_suffix.py @@ -75,7 +75,7 @@ def get_hash(path): def check_and_rename(filepath): base_name = os.path.basename(filepath) - match = re.match(r'(.*)_(gen|ppl|ll)_(.*).py', base_name) + match = re.match(r'(.*)_(gen|ppl|ll|mixed)_(.*).py', base_name) if match: dataset, mode, old_hash = match.groups() new_hash = get_hash(filepath)