From ff621ddb39baf133899da1a99d8f085cfe5cc788 Mon Sep 17 00:00:00 2001 From: zhangsongyang Date: Wed, 26 Feb 2025 15:29:19 +0000 Subject: [PATCH] [Update] Support OlympiadBench-Math/OmniMath/LiveMathBench-Hard with LLM Verify --- ...iadBenchMath_0shot_llmverify_gen_9c22f2.py | 105 ++++++++++++++++++ .../OlympiadBench/OlympiadBench_categories.py | 11 ++ ...hbench_hard_custom_llmverify_gen_9e7505.py | 99 +++++++++++++++++ ...rm800k_500_llmverify_repeat4_gen_97b203.py | 2 +- .../omni_math_llmverify_gen_ccf9c0.py | 89 +++++++++++++++ .../summarizers/groups/OlympiadBench.py | 11 ++ opencompass/datasets/generic.py | 10 +- .../datasets/livemathbench/livemathbench.py | 25 +++-- 8 files changed, 338 insertions(+), 14 deletions(-) create mode 100644 opencompass/configs/datasets/OlympiadBench/OlympiadBenchMath_0shot_llmverify_gen_9c22f2.py create mode 100644 opencompass/configs/datasets/livemathbench/livemathbench_hard_custom_llmverify_gen_9e7505.py create mode 100644 opencompass/configs/datasets/omni_math/omni_math_llmverify_gen_ccf9c0.py diff --git a/opencompass/configs/datasets/OlympiadBench/OlympiadBenchMath_0shot_llmverify_gen_9c22f2.py b/opencompass/configs/datasets/OlympiadBench/OlympiadBenchMath_0shot_llmverify_gen_9c22f2.py new file mode 100644 index 00000000..03881387 --- /dev/null +++ b/opencompass/configs/datasets/OlympiadBench/OlympiadBenchMath_0shot_llmverify_gen_9c22f2.py @@ -0,0 +1,105 @@ +from mmengine.config import read_base +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import OlympiadBenchDataset, OlympiadBenchEvaluator, olympiadbench_postprocess_v2 +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess + +with read_base(): + from .OlympiadBench_categories import math_categories as categories + +# Create prompter instance for problems +olympiadbench_prompter_cfg = dict( + type='OlympiadBenchPrompter' +) + +olympiadbench_reader_cfg = dict( + input_columns=[ + 'problem', 'language', 'subject', 'question_type', + 'answer_type', 'is_multiple_answer', 'unit', 'questions' + ], + output_column='solution' +) + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{problem}\n\n\n + : \n{solution}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + + +olympiadbenchMath_datasets = [] +for _name in categories: + olympiadbench_infer_cfg = dict( + prompt_template=dict( + type='OlympiadBenchTemplate' + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), + ) + + # Evaluation configuration + olympiadbench_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=OlympiadBenchDataset, + path='opencompass/OlympiadBench', + name=_name, + reader_cfg=olympiadbench_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', + ) + + olympiadbenchMath_datasets.append( + dict( + type=OlympiadBenchDataset, + abbr=f'OlympiadBench_{_name}', + path='opencompass/OlympiadBench', + name=_name, + reader_cfg=olympiadbench_reader_cfg, + infer_cfg=olympiadbench_infer_cfg, + eval_cfg=olympiadbench_eval_cfg, + ) + ) + +del _name diff --git a/opencompass/configs/datasets/OlympiadBench/OlympiadBench_categories.py b/opencompass/configs/datasets/OlympiadBench/OlympiadBench_categories.py index 818e5293..fdd4a56f 100644 --- a/opencompass/configs/datasets/OlympiadBench/OlympiadBench_categories.py +++ b/opencompass/configs/datasets/OlympiadBench/OlympiadBench_categories.py @@ -5,3 +5,14 @@ categories = [ 'OE_TO_physics_en_COMP', # OpenEnded - TextOnly - physics - COMP 'OE_TO_physics_zh_CEE' # OpenEnded - TextOnly - physics - CEE ] + +math_categories = [ + 'OE_TO_maths_en_COMP', # OpenEnded - TextOnly - maths - COMP + 'OE_TO_maths_zh_COMP', # OpenEnded - TextOnly - maths - COMP + 'OE_TO_maths_zh_CEE', # OpenEnded - TextOnly - maths - CEE +] + +physics_categories = [ + 'OE_TO_physics_en_COMP', # OpenEnded - TextOnly - physics - COMP + 'OE_TO_physics_zh_CEE' # OpenEnded - TextOnly - physics - CEE +] diff --git a/opencompass/configs/datasets/livemathbench/livemathbench_hard_custom_llmverify_gen_9e7505.py b/opencompass/configs/datasets/livemathbench/livemathbench_hard_custom_llmverify_gen_9e7505.py new file mode 100644 index 00000000..cc787456 --- /dev/null +++ b/opencompass/configs/datasets/livemathbench/livemathbench_hard_custom_llmverify_gen_9e7505.py @@ -0,0 +1,99 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import CustomDataset +from opencompass.datasets import generic_llmjudge_postprocess +from itertools import product + +# from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator + + +livemathbench_reader_cfg = dict(input_columns=['prompt'], output_column='answer') + + +# Inference configuration +livemathbench_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict( + role='HUMAN', + prompt='{prompt}\nRemember to put your final answer within \\boxed{}.', + ), + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + + +# Template for the LLM judge +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + : \n{prompt}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + + + +splits = ['hard_cn', 'hard_en'] +# Dataset configuration +livemathbench_datasets = [ + dict( + type=CustomDataset, + abbr=f'livemathbench_hard_custom_{split}_run{run_idx}', + path='data/LiveMathBench', + local_mode=True, + file_name=f'202412/{split}.jsonl', + reader_cfg=livemathbench_reader_cfg, + infer_cfg=livemathbench_infer_cfg, + eval_cfg=dict( + # # Evaluation configuration using LLM as judge + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=CustomDataset, + path='data/LiveMathBench', + local_mode=True, + file_name=f'202412/{split}.jsonl', + reader_cfg=livemathbench_reader_cfg, + ), + judge_cfg={}, + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + ), + ) for split, run_idx in product(splits, range(8)) +] diff --git a/opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py b/opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py index a7e373e9..1ac43b7c 100644 --- a/opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py +++ b/opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py @@ -88,7 +88,7 @@ math_eval_cfg = dict( math_datasets = [ dict( type=MATHDataset, - abbr=f'math_prm800k_500-llmjudge-run{idx}', + abbr=f'math_prm800k_500-llmverify-run{idx}', path='opencompass/math', file_name = 'test_prm800k_500.json', reader_cfg=math_reader_cfg, diff --git a/opencompass/configs/datasets/omni_math/omni_math_llmverify_gen_ccf9c0.py b/opencompass/configs/datasets/omni_math/omni_math_llmverify_gen_ccf9c0.py new file mode 100644 index 00000000..2a4320b8 --- /dev/null +++ b/opencompass/configs/datasets/omni_math/omni_math_llmverify_gen_ccf9c0.py @@ -0,0 +1,89 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.datasets.omni_math import OmniMathDataset + + +omnimath_reader_cfg = dict( + input_columns=['problem'], + output_column='answer' +) + +omnimath_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='please answer the following mathematical question, put your final answer in \\boxed{}.\n\n{problem}'), + ] + ) + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer) +) + + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{problem}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +omnimath_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=OmniMathDataset, + reader_cfg=omnimath_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), +) +omnimath_datasets = [ + dict( + type=OmniMathDataset, + abbr='OmniMath', + reader_cfg=omnimath_reader_cfg, + infer_cfg=omnimath_infer_cfg, + eval_cfg=omnimath_eval_cfg + ) +] \ No newline at end of file diff --git a/opencompass/configs/summarizers/groups/OlympiadBench.py b/opencompass/configs/summarizers/groups/OlympiadBench.py index 12fb5807..fc57f603 100644 --- a/opencompass/configs/summarizers/groups/OlympiadBench.py +++ b/opencompass/configs/summarizers/groups/OlympiadBench.py @@ -9,3 +9,14 @@ categories = [ OlympiadBench_summary_groups = [ {'name': 'OlympiadBench', 'subsets': ['OlympiadBench_' + c.replace(' ', '_') for c in categories]}, ] + +math_categories = [ + 'OE_TO_maths_en_COMP', # OpenEnded - TextOnly - maths - COMP + 'OE_TO_maths_zh_COMP', # OpenEnded - TextOnly - maths - COMP + 'OE_TO_maths_zh_CEE', # OpenEnded - TextOnly - maths - CEE +] + + +OlympiadBenchMath_summary_groups = [ + {'name': 'OlympiadBenchMath', 'subsets': ['OlympiadBench_' + c.replace(' ', '_') for c in math_categories]}, +] diff --git a/opencompass/datasets/generic.py b/opencompass/datasets/generic.py index 28a37a02..dc783167 100644 --- a/opencompass/datasets/generic.py +++ b/opencompass/datasets/generic.py @@ -1,5 +1,7 @@ import re +from opencompass.utils import get_logger + def get_final_results(judged_answers, references, @@ -68,7 +70,13 @@ def generic_llmjudge_postprocess( processed_judge = _generic_llmjudge_postprocess(v['prediction']) if processed_judge is not None: judged_answers.append(processed_judge) - references.append(v['gold']) + try: + references.append(v['gold']) + + except KeyError: + get_logger().warning( + f'No gold answer for {k}, use empty string as reference!') + references.append('') results = get_final_results(judged_answers, references, origial_responses) results['details'] = output return results diff --git a/opencompass/datasets/livemathbench/livemathbench.py b/opencompass/datasets/livemathbench/livemathbench.py index 13abf3aa..f01d4915 100644 --- a/opencompass/datasets/livemathbench/livemathbench.py +++ b/opencompass/datasets/livemathbench/livemathbench.py @@ -45,9 +45,8 @@ class LiveMathBenchDataset(BaseDataset): dataset = [] dataset_info = {} - if path != '': - path = get_data_path(path) - path = os.path.join(path, version) + # Use dataset mapping to generate path + data_dir = get_data_path(path) for split, language in product(dataset_splits, dataset_languages): dataset_info[f'{split}_{language}'] = { @@ -63,8 +62,17 @@ class LiveMathBenchDataset(BaseDataset): '问答': 'problem-solving' } - if path != '': - file_path = os.path.join(path, f'{split}_{language}.jsonl') + examples = [] + if data_dir.startswith('opencompass/'): + # Using HF Dataset + hf_dataset = load_dataset( + data_dir, f'v{version}_{split}_{language}')['test'] + for example in hf_dataset: + examples.append(example) + else: + file_path = os.path.join(data_dir, version, + f'{split}_{language}.jsonl') + if not os.path.exists(file_path): raise FileNotFoundError( f'File {file_path} does not exist, please check the ' @@ -73,13 +81,6 @@ class LiveMathBenchDataset(BaseDataset): with jsonlines.open(file_path, 'r') as file: for example in file: examples.append(example) - else: - hf_dataset = load_dataset( - 'opencompass/LiveMathBench', - f'v{version}_{split}_{language}')['test'] - examples = [] - for example in hf_dataset: - examples.append(example) for example_idx, example in enumerate(examples): dataset_info[f'{split}_{language}'][