From 22a33d8759b5b5980faaa1afbf710a78d2dfce48 Mon Sep 17 00:00:00 2001 From: Junnan Liu Date: Tue, 25 Feb 2025 17:24:36 +0800 Subject: [PATCH 1/2] [Update] Update LiveMathBench Hard Configs (#1826) * support G-Pass@k and livemathbench * fix bugs * fix comments of GPassKEvaluator * update saved details of GPassKEvaluator * update saved details of GPassKEvaluator * fix eval api configs & update openai_api for ease of debugging * update huggingface path * fix method name of G-Pass@k * fix default value of eval_model_name * refactor G-Pass@k evaluator * log generation params for each backend * fix evaluation resume * add notimplementerror * update livemathbench-hard configs * remove max_out_len from livemathbench_hard_greedy_gen_9befbf.py * remove max_out_len from livemathbench_hard_gen_9befbf.py * rename livemathbench_hard_gen_9befbf.py to livemathbench_hard_gen_353ae7.py * rename livemathbench_hard_greedy_gen_9befbf.py to livemathbench_hard_greedy_gen_353ae7.py * update livemathbench_gen_9befbf.py * remove whitespace * upload livemathbench hard configs --- .../livemathbench/livemathbench_gen_9befbf.py | 2 +- .../livemathbench/livemathbench_gen_f1c095.py | 49 ------------------ .../livemathbench/livemathbench_greedy_gen.py | 2 +- .../livemathbench_hard_gen_353ae7.py | 50 +++++++++++++++++++ .../livemathbench_hard_greedy_gen_353ae7.py | 50 +++++++++++++++++++ .../datasets/livemathbench/livemathbench.py | 2 +- .../models/turbomind_with_tf_above_v4_33.py | 2 - 7 files changed, 103 insertions(+), 54 deletions(-) delete mode 100644 opencompass/configs/datasets/livemathbench/livemathbench_gen_f1c095.py create mode 100644 opencompass/configs/datasets/livemathbench/livemathbench_hard_gen_353ae7.py create mode 100644 opencompass/configs/datasets/livemathbench/livemathbench_hard_greedy_gen_353ae7.py diff --git a/opencompass/configs/datasets/livemathbench/livemathbench_gen_9befbf.py b/opencompass/configs/datasets/livemathbench/livemathbench_gen_9befbf.py index 3748c022..454e7d3e 100644 --- a/opencompass/configs/datasets/livemathbench/livemathbench_gen_9befbf.py +++ b/opencompass/configs/datasets/livemathbench/livemathbench_gen_9befbf.py @@ -48,4 +48,4 @@ livemathbench_dataset = dict( ) ) ) -livemathbench_datasets = [livemathbench_dataset] \ No newline at end of file +livemathbench_datasets = [livemathbench_dataset] diff --git a/opencompass/configs/datasets/livemathbench/livemathbench_gen_f1c095.py b/opencompass/configs/datasets/livemathbench/livemathbench_gen_f1c095.py deleted file mode 100644 index 56161095..00000000 --- a/opencompass/configs/datasets/livemathbench/livemathbench_gen_f1c095.py +++ /dev/null @@ -1,49 +0,0 @@ -from opencompass.openicl.icl_prompt_template import PromptTemplate -from opencompass.openicl.icl_retriever import ZeroRetriever -from opencompass.openicl.icl_inferencer import GenInferencer - -from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator - - -livemathbench_reader_cfg = dict( - input_columns=['prompt'], - output_column='answer' -) - -livemathbench_infer_cfg = dict( - prompt_template=dict( - type=PromptTemplate, - template=dict( - round=[ - dict(role='HUMAN', prompt='{prompt}'), - ] - ) - ), - retriever=dict(type=ZeroRetriever), - inferencer=dict( - type=GenInferencer, - max_out_len=8192, - temperature=1.0 - ) -) - -livemathbench_eval_cfg = dict( - evaluator=dict( - type=LiveMathBenchEvaluator, - model_name='Qwen/Qwen2.5-72B-Instruct', - url=['http://172.30.40.154:23333/v1/'] #'https://api.openai.com/v1/' - ) -) - -livemathbench_datasets = [ - dict( - type=LiveMathBenchDataset, - abbr='LiveMathBench-k1-n1', - path='opencompass/LiveMathBench202412', - k=1, # K@Pass - n=1, # Run times - reader_cfg=livemathbench_reader_cfg, - infer_cfg=livemathbench_infer_cfg, - eval_cfg=livemathbench_eval_cfg - ) -] \ No newline at end of file diff --git a/opencompass/configs/datasets/livemathbench/livemathbench_greedy_gen.py b/opencompass/configs/datasets/livemathbench/livemathbench_greedy_gen.py index d311eeaf..c1d72d15 100644 --- a/opencompass/configs/datasets/livemathbench/livemathbench_greedy_gen.py +++ b/opencompass/configs/datasets/livemathbench/livemathbench_greedy_gen.py @@ -1,4 +1,4 @@ from mmengine.config import read_base with read_base(): - from .livemathbench_greedy_gen_efb20d import livemathbench_datasets # noqa: F401, F403 \ No newline at end of file + from .livemathbench_greedy_gen_9befbf import livemathbench_datasets # noqa: F401, F403 \ No newline at end of file diff --git a/opencompass/configs/datasets/livemathbench/livemathbench_hard_gen_353ae7.py b/opencompass/configs/datasets/livemathbench/livemathbench_hard_gen_353ae7.py new file mode 100644 index 00000000..e932d3c3 --- /dev/null +++ b/opencompass/configs/datasets/livemathbench/livemathbench_hard_gen_353ae7.py @@ -0,0 +1,50 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer + +from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator + + +livemathbench_dataset = dict( + type=LiveMathBenchDataset, + path='', + k=16, + replication=3, + dataset_splits=['hard'], + dataset_languages=['cn', 'en'], + cot=True, + version='202412', + abbr='LiveMathBench-v202412-Hard', + reader_cfg=dict( + input_columns=['prompt'], + output_column='answer' + ), + infer_cfg=dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{prompt}'), + ] + ) + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict( + type=GenInferencer + ), + ), + eval_cfg=dict( + evaluator=dict( + type=LiveMathBenchEvaluator, + model_name='', + url=[], + use_extract_model=False, + extract_url=[], + extract_model_name='', + k=[4, 8, 16], + replication=3, + thresholds=[0.0, 0.25, 0.5, 0.75, 1.0] + ) + ) +) +livemathbench_datasets = [livemathbench_dataset] \ No newline at end of file diff --git a/opencompass/configs/datasets/livemathbench/livemathbench_hard_greedy_gen_353ae7.py b/opencompass/configs/datasets/livemathbench/livemathbench_hard_greedy_gen_353ae7.py new file mode 100644 index 00000000..830e55af --- /dev/null +++ b/opencompass/configs/datasets/livemathbench/livemathbench_hard_greedy_gen_353ae7.py @@ -0,0 +1,50 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer + +from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator + + +livemathbench_dataset = dict( + type=LiveMathBenchDataset, + path='', + k=1, + replication=1, + dataset_splits=['hard'], + dataset_languages=['cn', 'en'], + cot=True, + version='202412', + abbr='LiveMathBench-v202412-Hard', + reader_cfg=dict( + input_columns=['prompt'], + output_column='answer' + ), + infer_cfg=dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{prompt}'), + ] + ) + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict( + type=GenInferencer + ), + ), + eval_cfg=dict( + evaluator=dict( + type=LiveMathBenchEvaluator, + model_name='', + url=[], + use_extract_model=False, + extract_url=[], + extract_model_name='', + k=[1], + replication=1, + thresholds=[0.0] + ) + ) +) +livemathbench_datasets = [livemathbench_dataset] \ No newline at end of file diff --git a/opencompass/datasets/livemathbench/livemathbench.py b/opencompass/datasets/livemathbench/livemathbench.py index d2b4b93b..13abf3aa 100644 --- a/opencompass/datasets/livemathbench/livemathbench.py +++ b/opencompass/datasets/livemathbench/livemathbench.py @@ -48,6 +48,7 @@ class LiveMathBenchDataset(BaseDataset): if path != '': path = get_data_path(path) path = os.path.join(path, version) + for split, language in product(dataset_splits, dataset_languages): dataset_info[f'{split}_{language}'] = { 'single-choice': 0, @@ -64,7 +65,6 @@ class LiveMathBenchDataset(BaseDataset): if path != '': file_path = os.path.join(path, f'{split}_{language}.jsonl') - if not os.path.exists(file_path): raise FileNotFoundError( f'File {file_path} does not exist, please check the ' diff --git a/opencompass/models/turbomind_with_tf_above_v4_33.py b/opencompass/models/turbomind_with_tf_above_v4_33.py index 88b605f9..7138974d 100644 --- a/opencompass/models/turbomind_with_tf_above_v4_33.py +++ b/opencompass/models/turbomind_with_tf_above_v4_33.py @@ -164,8 +164,6 @@ class TurboMindModelwithChatTemplate(BaseModel): self.logger.info('Generation Config of LMdeploy: ') self.logger.info(gen_config) - - results = [] outputs = self.pipe(messages, gen_config=gen_config, do_preprocess=False) for output in outputs: From fd6fbf01a244663e866369bbedb4a974dd0bd37e Mon Sep 17 00:00:00 2001 From: Songyang Zhang Date: Tue, 25 Feb 2025 20:34:41 +0800 Subject: [PATCH 2/2] [Update] Support AIME-24 Evaluation for DeepSeek-R1 series (#1888) * Update * Update * Update * Update --- examples/eval_simpleqa.py | 4 +- opencompass/cli/main.py | 2 - ...lympiadBench_0shot_llmverify_gen_be8b13.py | 109 ++++++++++++++++++ .../aime2024_llmverify_repeat16_gen_bf7475.py | 96 +++++++++++++++ .../aime2024_llmverify_repeat8_gen_e8fcee.py | 96 +++++++++++++++ .../math_prm800k_500_llmverify_gen_6ff468.py | 99 ++++++++++++++++ ...rm800k_500_llmverify_repeat4_gen_97b203.py | 100 ++++++++++++++++ opencompass/datasets/base.py | 3 +- .../icl_retriever/icl_topk_retriever.py | 3 +- requirements/runtime.txt | 2 +- tools/list_configs.py | 8 +- 11 files changed, 512 insertions(+), 10 deletions(-) create mode 100644 opencompass/configs/datasets/OlympiadBench/OlympiadBench_0shot_llmverify_gen_be8b13.py create mode 100644 opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat16_gen_bf7475.py create mode 100644 opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat8_gen_e8fcee.py create mode 100644 opencompass/configs/datasets/math/math_prm800k_500_llmverify_gen_6ff468.py create mode 100644 opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py diff --git a/examples/eval_simpleqa.py b/examples/eval_simpleqa.py index 193c63b8..ade38d38 100644 --- a/examples/eval_simpleqa.py +++ b/examples/eval_simpleqa.py @@ -36,8 +36,8 @@ infer = dict( eval = dict( partitioner=dict( type=SubjectiveNaivePartitioner, - models=[gpt_4o_2024_05_13_model], - judge_models=[gpt_4o_2024_05_13_model], + models=models, + judge_models=judge_models, ), runner=dict(type=LocalRunner, max_num_workers=256, diff --git a/opencompass/cli/main.py b/opencompass/cli/main.py index 63377371..21308e10 100644 --- a/opencompass/cli/main.py +++ b/opencompass/cli/main.py @@ -177,8 +177,6 @@ def parse_dlc_args(dlc_parser): type=str) - - def parse_hf_args(hf_parser): """These args are all for the quick construction of HuggingFace models.""" hf_parser.add_argument('--hf-type', type=str, choices=['base', 'chat'], default='chat', help='The type of the HuggingFace model, base or chat') diff --git a/opencompass/configs/datasets/OlympiadBench/OlympiadBench_0shot_llmverify_gen_be8b13.py b/opencompass/configs/datasets/OlympiadBench/OlympiadBench_0shot_llmverify_gen_be8b13.py new file mode 100644 index 00000000..e0e59a33 --- /dev/null +++ b/opencompass/configs/datasets/OlympiadBench/OlympiadBench_0shot_llmverify_gen_be8b13.py @@ -0,0 +1,109 @@ +from mmengine.config import read_base +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import OlympiadBenchDataset, OlympiadBenchEvaluator, olympiadbench_postprocess_v2 +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess + +with read_base(): + from .OlympiadBench_categories import categories + +# Create prompter instance for problems +olympiadbench_prompter_cfg = dict( + type='OlympiadBenchPrompter' +) + +olympiadbench_reader_cfg = dict( + input_columns=[ + 'problem', 'language', 'subject', 'question_type', + 'answer_type', 'is_multiple_answer', 'unit', 'questions' + ], + output_column='solution' +) + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{problem}\n\n\n + : \n{solution}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + + +olympiadbench_datasets = [] +for _name in categories: + olympiadbench_infer_cfg = dict( + prompt_template=dict( + type='OlympiadBenchTemplate' + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), + ) + + # olympiadbench_eval_cfg = dict( + # evaluator=dict(type=OlympiadBenchEvaluator, version='v2'), + # pred_postprocessor=dict(type=olympiadbench_postprocess_v2), + # ) + # Evaluation configuration + olympiadbench_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=OlympiadBenchDataset, + path='opencompass/OlympiadBench', + name=_name, + reader_cfg=olympiadbench_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', + ) + + olympiadbench_datasets.append( + dict( + type=OlympiadBenchDataset, + abbr=f'OlympiadBench_{_name}', + path='opencompass/OlympiadBench', + name=_name, + reader_cfg=olympiadbench_reader_cfg, + infer_cfg=olympiadbench_infer_cfg, + eval_cfg=olympiadbench_eval_cfg, + ) + ) + +del _name diff --git a/opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat16_gen_bf7475.py b/opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat16_gen_bf7475.py new file mode 100644 index 00000000..070a63bc --- /dev/null +++ b/opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat16_gen_bf7475.py @@ -0,0 +1,96 @@ +# CoT: No CoT +# K-Shot: 0-Shot +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import Aime2024Dataset, MATHEvaluator, math_postprocess_v2 +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess + +aime2024_reader_cfg = dict( + input_columns=['question'], + output_column='answer' +) + + +aime2024_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{question}\nRemember to put your final answer within \\boxed{}.'), + ], + ) + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer) +) + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{question}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +aime2024_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=Aime2024Dataset, + path='opencompass/aime2024', + reader_cfg=aime2024_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', +) + +aime2024_datasets = [ + dict( + abbr=f'aime2024-run{idx}', + type=Aime2024Dataset, + path='opencompass/aime2024', + reader_cfg=aime2024_reader_cfg, + infer_cfg=aime2024_infer_cfg, + eval_cfg=aime2024_eval_cfg, + mode='singlescore', + ) + for idx in range(16) +] \ No newline at end of file diff --git a/opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat8_gen_e8fcee.py b/opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat8_gen_e8fcee.py new file mode 100644 index 00000000..07fa6d1b --- /dev/null +++ b/opencompass/configs/datasets/aime2024/aime2024_llmverify_repeat8_gen_e8fcee.py @@ -0,0 +1,96 @@ +# CoT: No CoT +# K-Shot: 0-Shot +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import Aime2024Dataset, MATHEvaluator, math_postprocess_v2 +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess + +aime2024_reader_cfg = dict( + input_columns=['question'], + output_column='answer' +) + + +aime2024_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{question}\nRemember to put your final answer within \\boxed{}.'), + ], + ) + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer) +) + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{question}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +aime2024_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=Aime2024Dataset, + path='opencompass/aime2024', + reader_cfg=aime2024_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', +) + +aime2024_datasets = [ + dict( + abbr=f'aime2024-run{idx}', + type=Aime2024Dataset, + path='opencompass/aime2024', + reader_cfg=aime2024_reader_cfg, + infer_cfg=aime2024_infer_cfg, + eval_cfg=aime2024_eval_cfg, + mode='singlescore', + ) + for idx in range(8) +] \ No newline at end of file diff --git a/opencompass/configs/datasets/math/math_prm800k_500_llmverify_gen_6ff468.py b/opencompass/configs/datasets/math/math_prm800k_500_llmverify_gen_6ff468.py new file mode 100644 index 00000000..78e66452 --- /dev/null +++ b/opencompass/configs/datasets/math/math_prm800k_500_llmverify_gen_6ff468.py @@ -0,0 +1,99 @@ +# CoT: No CoT +# K-Shot: 0-Shot +# Verify: LLM Verify +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.datasets import MATHDataset + + +# ----------------------------- Detailed Config ----------------------------- + +math_reader_cfg = dict(input_columns=['problem'], output_column='solution') + +math_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'), + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{problem}\n\n\n + : \n{solution}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +# Evaluation configuration +math_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=MATHDataset, + path='opencompass/math', + file_name = 'test_prm800k_500.json', + reader_cfg=math_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', +) + + +math_datasets = [ + dict( + type=MATHDataset, + abbr='math_prm800k_500-llmjudge', + path='opencompass/math', + file_name = 'test_prm800k_500.json', + reader_cfg=math_reader_cfg, + infer_cfg=math_infer_cfg, + eval_cfg=math_eval_cfg, + mode='singlescore', + ) +] diff --git a/opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py b/opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py new file mode 100644 index 00000000..a7e373e9 --- /dev/null +++ b/opencompass/configs/datasets/math/math_prm800k_500_llmverify_repeat4_gen_97b203.py @@ -0,0 +1,100 @@ +# CoT: No CoT +# K-Shot: 0-Shot +# Verify: LLM Verify +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.datasets import MATHDataset + + +# ----------------------------- Detailed Config ----------------------------- + +math_reader_cfg = dict(input_columns=['problem'], output_column='solution') + +math_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'), + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{problem}\n\n\n + : \n{solution}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +# Evaluation configuration +math_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.") + ], + round=[ + dict( + role='HUMAN', + prompt = GRADER_TEMPLATE + ), + ]), + ), + dataset_cfg=dict( + type=MATHDataset, + path='opencompass/math', + file_name = 'test_prm800k_500.json', + reader_cfg=math_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', +) + + +math_datasets = [ + dict( + type=MATHDataset, + abbr=f'math_prm800k_500-llmjudge-run{idx}', + path='opencompass/math', + file_name = 'test_prm800k_500.json', + reader_cfg=math_reader_cfg, + infer_cfg=math_infer_cfg, + eval_cfg=math_eval_cfg, + mode='singlescore', + ) + for idx in range(4) +] diff --git a/opencompass/datasets/base.py b/opencompass/datasets/base.py index 5412ef4c..5dc0f073 100644 --- a/opencompass/datasets/base.py +++ b/opencompass/datasets/base.py @@ -1,4 +1,3 @@ -from abc import abstractstaticmethod from typing import Dict, Optional, Union from datasets import Dataset, DatasetDict @@ -23,6 +22,6 @@ class BaseDataset: def test(self): return self.reader.dataset['test'] - @abstractstaticmethod + @staticmethod def load(**kwargs) -> Union[Dataset, DatasetDict]: pass diff --git a/opencompass/openicl/icl_retriever/icl_topk_retriever.py b/opencompass/openicl/icl_retriever/icl_topk_retriever.py index c9ac8f81..9703a621 100644 --- a/opencompass/openicl/icl_retriever/icl_topk_retriever.py +++ b/opencompass/openicl/icl_retriever/icl_topk_retriever.py @@ -7,7 +7,6 @@ from typing import Any, Dict, List, Optional, Union import numpy as np import torch import tqdm -from sentence_transformers import SentenceTransformer from torch.utils.data import DataLoader from transformers import AutoTokenizer, BatchEncoding, PreTrainedTokenizerBase from transformers.file_utils import PaddingStrategy @@ -56,6 +55,8 @@ class TopkRetriever(BaseRetriever): tokenizer_name: Optional[str] = 'gpt2-xl', batch_size: Optional[int] = 1) -> None: super().__init__(dataset, ice_separator, ice_eos_token, ice_num) + from sentence_transformers import SentenceTransformer + self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.batch_size = batch_size self.tokenizer_name = tokenizer_name diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 47133f21..348df85d 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -9,7 +9,7 @@ fuzzywuzzy gradio-client h5py httpx==0.27.2 -huggingface_hub<=0.24.7 +huggingface_hub immutabledict importlib-metadata jieba diff --git a/tools/list_configs.py b/tools/list_configs.py index cc778aeb..9aa6cc49 100644 --- a/tools/list_configs.py +++ b/tools/list_configs.py @@ -19,11 +19,15 @@ def parse_args(): def main(): args = parse_args() - models = match_files('configs/models/', args.pattern, fuzzy=True) + models = match_files('opencompass/configs/models/', + args.pattern, + fuzzy=True) if models: table = [['Model', 'Config Path'], *models] print(tabulate.tabulate(table, headers='firstrow', tablefmt='psql')) - datasets = match_files('configs/datasets/', args.pattern, fuzzy=True) + datasets = match_files('opencompass/configs/datasets/', + args.pattern, + fuzzy=True) if datasets: table = [['Dataset', 'Config Path'], *datasets] print(tabulate.tabulate(table, headers='firstrow', tablefmt='psql'))