From 20660ab5071ab66215345d5fe8c4344df8bfcca6 Mon Sep 17 00:00:00 2001 From: Junnan Liu Date: Thu, 10 Apr 2025 19:47:21 +0800 Subject: [PATCH 1/5] [Fix] Fix compare error when k is list in base_evaluator (#2010) * fix gpass compare error of list k * fix compare error in 177 --- opencompass/openicl/icl_evaluator/icl_base_evaluator.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/opencompass/openicl/icl_evaluator/icl_base_evaluator.py b/opencompass/openicl/icl_evaluator/icl_base_evaluator.py index f7ff0277..10cc3fe4 100644 --- a/opencompass/openicl/icl_evaluator/icl_base_evaluator.py +++ b/opencompass/openicl/icl_evaluator/icl_base_evaluator.py @@ -159,9 +159,10 @@ class BaseEvaluator: can_calculate = True c += int(example['detail']['is_correct']) - if can_calculate and n > 1 and k > 1: + k_list = [k] if isinstance(k, int) else k + if can_calculate and n > 1 and max(k_list) > 1: thresholds = [0.0, 0.25, 0.5, 0.75, 1.0] - for _k in [k] if isinstance(k, int) else k: + for _k in k_list: for threshold in thresholds: g_pass = compute_g_pass_at_k(n=n, c=c, @@ -174,7 +175,7 @@ class BaseEvaluator: eval_details.append(detail) - if can_calculate and n > 1 and k > 1: + if can_calculate and n > 1 and max(k_list) > 1: eval_results.update(self.reduce(eval_details)) # Store eval_details in eval_results From 3f50b1dc49778431fd6fefdd9a2e845e1e79f12e Mon Sep 17 00:00:00 2001 From: bittersweet1999 <148421775+bittersweet1999@users.noreply.github.com> Date: Fri, 11 Apr 2025 16:59:40 +0800 Subject: [PATCH 2/5] [Fix] fix order bug Update arena_hard.py (#2015) --- opencompass/datasets/subjective/arena_hard.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencompass/datasets/subjective/arena_hard.py b/opencompass/datasets/subjective/arena_hard.py index b146f3ac..1403c978 100644 --- a/opencompass/datasets/subjective/arena_hard.py +++ b/opencompass/datasets/subjective/arena_hard.py @@ -146,7 +146,7 @@ def preety_print_two_ratings(ratings_1, ratings_2, column_names): def predict_win_rate(elo_ratings, SCALE=400, BASE=10, INIT_RATING=1000): - names = sorted(list(elo_ratings.keys())) + names = list(elo_ratings.keys()) wins = defaultdict(lambda: defaultdict(lambda: 0)) for a in names: for b in names: From 6a6a1a5c0b24bb22f3b3fee4f395b03c21edb348 Mon Sep 17 00:00:00 2001 From: Linchen Xiao Date: Fri, 11 Apr 2025 19:01:39 +0800 Subject: [PATCH 3/5] [Feature] LLM Judge sanity check (#2012) * update * update --- opencompass/datasets/generic.py | 37 ++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/opencompass/datasets/generic.py b/opencompass/datasets/generic.py index dc783167..07b6a0bb 100644 --- a/opencompass/datasets/generic.py +++ b/opencompass/datasets/generic.py @@ -11,17 +11,18 @@ def get_final_results(judged_answers, is_correct_count = 0 is_incorrect_count = 0 is_not_attempted_count = 0 + attempted_judge_count = 0 details = [] for i, j, k in zip(judged_answers, references, origial_responses): - match = re.search(r'(A|B)', i) - grade_letter = match.group( - 0) if match else 'B' # Default to "INCORRECT" if no match + if i in ['A', 'B']: + attempted_judge_count += 1 + grade_letter = i detail = { 'pred': k, 'ref': j, 'origin_grade_response': i, 'grade_letter': grade_letter, - 'correct': False + 'correct': False, } count += 1 if grade_letter == 'A': @@ -35,26 +36,32 @@ def get_final_results(judged_answers, is_correct = is_correct_count / count is_incorrect = is_incorrect_count / count - # is_not_attempted = is_not_attempted_count / count is_given_attempted = is_correct + is_incorrect - accuracy_given_attempted = is_correct / is_given_attempted \ - if is_given_attempted > 0 else 0 - f1 = 2 * accuracy_given_attempted * is_correct / ( - accuracy_given_attempted + is_correct) if (accuracy_given_attempted + - is_correct) > 0 else 0 + loose_accuracy = is_correct / count + accuracy_given_attempted = (is_correct / is_given_attempted + if is_given_attempted > 0 else 0) + attempted_judge_ratio = attempted_judge_count / count + + f1 = (2 * accuracy_given_attempted * is_correct / + (accuracy_given_attempted + is_correct) if + (accuracy_given_attempted + is_correct) > 0 else 0) result = { - # 'accuracy_given_attempted': accuracy_given_attempted, - metric_name: accuracy_given_attempted * 100, + metric_name: loose_accuracy * 100, + f'{metric_name}_given_attempted': accuracy_given_attempted * 100, 'f1': f1, - 'details': details + 'attempted_ratio': attempted_judge_ratio * 100, + 'correct_count': is_correct_count, + 'incorrect_count': is_incorrect_count, + 'not_attempted_count': is_not_attempted_count, + 'details': details, } return result def _generic_llmjudge_postprocess(judgement: str): match = re.search(r'(A|B)', judgement) - grade_letter = match.group( - 0) if match else 'B' # Default to "INCORRECT" if no match + grade_letter = (match.group(0) if match else 'unknown' + ) # Return 'unknown' if no match return grade_letter From 75e7834b59c422d821f1299559c530a083e4abd8 Mon Sep 17 00:00:00 2001 From: Myhs_phz Date: Mon, 14 Apr 2025 20:18:47 +0800 Subject: [PATCH 4/5] [Feature] Add Datasets: ClimateQA,Physics (#2017) * feat ClimateQA * feat PHYSICS * fix * fix * fix * fix --- dataset-index.yml | 14 ++ .../ClimaQA/ClimaQA_Gold_llm_judge_gen.py | 4 + .../ClimaQA_Gold_llm_judge_gen_f15343.py | 164 ++++++++++++++++++ .../ClimaQA/ClimaQA_Silver_llm_judge_gen.py | 4 + .../ClimaQA_Silver_llm_judge_gen_f15343.py | 160 +++++++++++++++++ .../datasets/PHYSICS/PHYSICS_llm_judge_gen.py | 4 + .../PHYSICS/PHYSICS_llm_judge_gen_a133a2.py | 131 ++++++++++++++ .../configs/summarizers/groups/PHYSICS.py | 14 ++ opencompass/datasets/__init__.py | 2 + opencompass/datasets/climaqa.py | 30 ++++ opencompass/datasets/physics.py | 30 ++++ opencompass/utils/datasets_info.py | 31 ++++ 12 files changed, 588 insertions(+) create mode 100644 opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen.py create mode 100644 opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen_f15343.py create mode 100644 opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen.py create mode 100644 opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen_f15343.py create mode 100644 opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen.py create mode 100644 opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen_a133a2.py create mode 100644 opencompass/configs/summarizers/groups/PHYSICS.py create mode 100644 opencompass/datasets/climaqa.py create mode 100644 opencompass/datasets/physics.py diff --git a/dataset-index.yml b/dataset-index.yml index f1581c21..89fde388 100644 --- a/dataset-index.yml +++ b/dataset-index.yml @@ -997,3 +997,17 @@ paper: https://arxiv.org/pdf/2502.14739 configpath: opencompass/configs/datasets/supergpqa configpath_llmjudge: '' +- climaqa: + name: ClimaQA + category: Science + paper: https://arxiv.org/pdf/2410.16701 + configpath: '' + configpath_llmjudge: + - opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge.py + - opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge.py +- physics: + name: PHYSICS + category: Science + paper: https://arxiv.org/pdf/2503.21821 + configpath: '' + configpath_llmjudge: opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen_a133a2.py \ No newline at end of file diff --git a/opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen.py b/opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen.py new file mode 100644 index 00000000..599b6b82 --- /dev/null +++ b/opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .ClimaQA_Gold_llm_judge_gen_f15343 import climaqa_datasets # noqa: F401, F403 \ No newline at end of file diff --git a/opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen_f15343.py b/opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen_f15343.py new file mode 100644 index 00000000..1ec9283d --- /dev/null +++ b/opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge_gen_f15343.py @@ -0,0 +1,164 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import ClimaQADataset, generic_llmjudge_postprocess + +from opencompass.evaluator import GenericLLMEvaluator + +climaqa_gold_sets = [ + 'mcq', + 'cloze', + 'ffq' +] + +GRADER_TEMPLATE_mcq = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct. + 3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer. + + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{input}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + + +GRADER_TEMPLATE_cloze = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect. + 3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{input}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + + +GRADER_TEMPLATE_ffq = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer. + + Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: very close to the answer + B: not very close to the answer + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{input}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + + +climaqa_reader_cfg = dict(input_columns=['input'], output_column='target') + +climaqa_datasets = [] + +for _task in climaqa_gold_sets: + + if _task == 'mcq': + GRADER_TEMPLATE = GRADER_TEMPLATE_mcq + infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: " + if _task == 'ffq': + GRADER_TEMPLATE = GRADER_TEMPLATE_ffq + infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: " + if _task == 'cloze': + GRADER_TEMPLATE = GRADER_TEMPLATE_cloze + infer_prompt = f"Fill the in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: " + + climaqa_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict( + role='HUMAN', + prompt=infer_prompt, + ) + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), + ) + + climaqa_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=ClimaQADataset, + path='opencompass/ClimaQA-Gold', + task=_task, + abbr='ClimaQA_Gold_' + _task, + reader_cfg=climaqa_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', + ) + + climaqa_datasets.append( + dict( + abbr='ClimaQA_Gold_' + _task, + type=ClimaQADataset, + path='opencompass/ClimaQA-Gold', + task=_task, + reader_cfg=climaqa_reader_cfg, + infer_cfg=climaqa_infer_cfg, + eval_cfg=climaqa_eval_cfg, + ) + ) + + diff --git a/opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen.py b/opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen.py new file mode 100644 index 00000000..958ca7d3 --- /dev/null +++ b/opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .ClimaQA_Silver_llm_judge_gen_f15343 import climaqa_datasets # noqa: F401, F403 \ No newline at end of file diff --git a/opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen_f15343.py b/opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen_f15343.py new file mode 100644 index 00000000..afe64d9d --- /dev/null +++ b/opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge_gen_f15343.py @@ -0,0 +1,160 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import ClimaQADataset, generic_llmjudge_postprocess + +from opencompass.evaluator import GenericLLMEvaluator + +climaqa_silver_sets = [ + 'mcq', + 'cloze', + 'ffq' +] + +GRADER_TEMPLATE_mcq = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct. + 3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer. + + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{input}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +GRADER_TEMPLATE_cloze = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect. + 3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{input}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +GRADER_TEMPLATE_ffq = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer. + + Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: very close to the answer + B: not very close to the answer + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + + : \n{input}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +climaqa_reader_cfg = dict(input_columns=['input'], output_column='target') + +climaqa_datasets = [] + +for _task in climaqa_silver_sets: + + if _task == 'mcq': + GRADER_TEMPLATE = GRADER_TEMPLATE_mcq + infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: " + if _task == 'ffq': + GRADER_TEMPLATE = GRADER_TEMPLATE_ffq + infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: " + if _task == 'cloze': + GRADER_TEMPLATE = GRADER_TEMPLATE_cloze + infer_prompt = f"Fill the in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: " + + climaqa_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict( + role='HUMAN', + prompt=infer_prompt, + ) + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), + ) + + climaqa_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=ClimaQADataset, + path='opencompass/ClimaQA-Silver', + task=_task, + abbr='ClimaQA_Silver_' + _task, + reader_cfg=climaqa_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', + ) + + climaqa_datasets.append( + dict( + abbr='ClimaQA_Silver_' + _task, + type=ClimaQADataset, + path='opencompass/ClimaQA-Silver', + task=_task, + reader_cfg=climaqa_reader_cfg, + infer_cfg=climaqa_infer_cfg, + eval_cfg=climaqa_eval_cfg, + ) + ) + diff --git a/opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen.py b/opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen.py new file mode 100644 index 00000000..3859ddde --- /dev/null +++ b/opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .PHYSICS_llm_judge_gen_a133a2 import physics_datasets # noqa: F401, F403 \ No newline at end of file diff --git a/opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen_a133a2.py b/opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen_a133a2.py new file mode 100644 index 00000000..79b8d023 --- /dev/null +++ b/opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen_a133a2.py @@ -0,0 +1,131 @@ +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.datasets import ( + PHYSICSDataset, + generic_llmjudge_postprocess, +) +from opencompass.evaluator import GenericLLMEvaluator + +physics_sets = [ + 'atomic_dataset_textonly', + 'electro_dataset_textonly', + 'mechanics_dataset_textonly', + 'optics_dataset_textonly', + 'quantum_dataset_textonly', + 'statistics_dataset_textonly', +] + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some questions may include multiple sub questions and sub answers. Each sub answer is given after a guide character in the form of or , etc. Please note that only when all sub predictions given in prediction correspond one-to-one with the answer and are all correct, will the prediction be considered correct; otherwise, it will be considered incorrect. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + 5. The final answers in the prediction are generally given with \\boxed{}. If you cannot find sufficient \\boxed{} in the prediction, please try to find matching answers from other places within the prediction as much as possible. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: All Sub Predictions Are Correct + B: Not Every Sub Predictions is Correct + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either A, B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + : \n{input}\n\n\n + : \n{target}\n\n\n + : \n{prediction}\n\n\n + + Judging the correctness of candidates' answers: +""".strip() + +# GRADER_TEMPLATE = """ +# Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. +# +# Here are some evaluation criteria: +# 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. +# 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. +# 3. Some questions may include multiple sub questions and sub answers. Each sub answer is given after a guide character in the form of or , etc. Please note that as long as at least one correct answer appears in the prediction, the prediction is considered correct. +# 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. +# 5. The final answers in the prediction are generally given with \\boxed{}. If you cannot find sufficient \\boxed{} in the prediction, please try to find matching answers from other places within the prediction as much as possible. +# +# Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: +# A: At Least One Sub Prediction is Correct +# B: All Sub Predictions are Incorrect +# Just return the letters "A" or "B", with no text around it. +# +# Here is your task. Simply reply with either A, B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. +# +# : \n{input}\n\n\n +# : \n{target}\n\n\n +# : \n{prediction}\n\n\n +# +# Judging the correctness of candidates' answers: +# """.strip() + +physics_reader_cfg = dict(input_columns=['input'], output_column='target') + +physics_datasets = [] + +for _name in physics_sets: + + physics_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict( + role='HUMAN', + prompt=f'Answer the given question step by step. Begin by explaining your reasoning process clearly. Conclude by providing the final answers at the end in LaTeX boxed format. Think step by step before answering. It should be noted that the question may include multiple sub questions, please ensure that each question is answered in order.\n\nQ: {{input}}\nA: ', + ) + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), + ) + + physics_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=PHYSICSDataset, + path='opencompass/PHYSICS-textonly', + abbr='PHYSICS_' + _name, + name=_name, + reader_cfg=physics_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), + pred_role='BOT', + ) + + physics_datasets.append( + dict( + abbr='PHYSICS_' + _name, + type=PHYSICSDataset, + path='opencompass/PHYSICS-textonly', + name=_name, + reader_cfg=physics_reader_cfg, + infer_cfg=physics_infer_cfg, + eval_cfg=physics_eval_cfg, + ) + ) + diff --git a/opencompass/configs/summarizers/groups/PHYSICS.py b/opencompass/configs/summarizers/groups/PHYSICS.py new file mode 100644 index 00000000..eff80721 --- /dev/null +++ b/opencompass/configs/summarizers/groups/PHYSICS.py @@ -0,0 +1,14 @@ +physics_summary_groups = [] + +# bbh +_physcis = [ + 'atomic_dataset_textonly', + 'electro_dataset_textonly', + 'mechanics_dataset_textonly', + 'optics_dataset_textonly', + 'quantum_dataset_textonly', + 'statistics_dataset_textonly', +] + +_physcis = ['PHYSICS_' + s for s in _physcis] +physics_summary_groups.append({'name': 'PHYSICS', 'subsets': _physcis}) \ No newline at end of file diff --git a/opencompass/datasets/__init__.py b/opencompass/datasets/__init__.py index 3e2d0eef..82314e9e 100644 --- a/opencompass/datasets/__init__.py +++ b/opencompass/datasets/__init__.py @@ -25,6 +25,7 @@ from .chinese_simpleqa import * # noqa: F401, F403 from .cibench import * # noqa: F401, F403 from .circular import * # noqa: F401, F403 from .civilcomments import * # noqa: F401, F403 +from .climaqa import * # noqa: F401, F403 from .clozeTest_maxmin import * # noqa: F401, F403 from .cluewsc import * # noqa: F401, F403 from .cmb import * # noqa: F401, F403 @@ -110,6 +111,7 @@ from .obqa import * # noqa: F401, F403 from .olymmath import * # noqa: F401, F403 from .OlympiadBench import * # noqa: F401, F403 from .OpenFinData import * # noqa: F401, F403 +from .physics import * # noqa: F401, F403 from .piqa import * # noqa: F401, F403 from .py150 import * # noqa: F401, F403 from .qasper import * # noqa: F401, F403 diff --git a/opencompass/datasets/climaqa.py b/opencompass/datasets/climaqa.py new file mode 100644 index 00000000..b11988b7 --- /dev/null +++ b/opencompass/datasets/climaqa.py @@ -0,0 +1,30 @@ +import os + +from datasets import load_dataset + +from opencompass.datasets.base import BaseDataset +from opencompass.registry import LOAD_DATASET +from opencompass.utils import get_data_path + + +@LOAD_DATASET.register_module() +class ClimaQADataset(BaseDataset): + + @staticmethod + def load(path: str, task: str, **kwargs): + + path = get_data_path(path) + path = os.path.join(path, task) + climateqa = load_dataset(path)['train'] + + input_column = [] + for i in range(len(climateqa)): + if 'Options' in climateqa[i].keys( + ) and climateqa[i]['Options'] is not None: + input_column.append(climateqa[i]['Question'] + '\n' + + climateqa[i]['Options']) + else: + input_column.append(climateqa[i]['Question']) + climateqa = climateqa.add_column(name='input', column=input_column) + climateqa = climateqa.rename_column('Answer', 'target') + return climateqa diff --git a/opencompass/datasets/physics.py b/opencompass/datasets/physics.py new file mode 100644 index 00000000..2e5f878f --- /dev/null +++ b/opencompass/datasets/physics.py @@ -0,0 +1,30 @@ +import os + +from datasets import load_dataset + +from opencompass.datasets.base import BaseDataset +from opencompass.registry import LOAD_DATASET +from opencompass.utils import get_data_path + + +@LOAD_DATASET.register_module() +class PHYSICSDataset(BaseDataset): + + @staticmethod + def load(path: str, name: str, **kwargs): + path = get_data_path(path) + path = os.path.join(path, name) + physics = load_dataset(path)['train'] + physics = physics.rename_column('questions', 'input') + + target = [] + for i in physics: + this_final_answer = '' + for j in range(len(i['final_answers'])): + this_final_answer += 'Answer ' + str(j + 1) + ': ' + this_final_answer += i['final_answers'][j] + this_final_answer += '\n' + target.append(this_final_answer) + physics = physics.add_column(name='target', column=target) + + return physics diff --git a/opencompass/utils/datasets_info.py b/opencompass/utils/datasets_info.py index 00db25e8..e3690162 100644 --- a/opencompass/utils/datasets_info.py +++ b/opencompass/utils/datasets_info.py @@ -420,9 +420,40 @@ DATASETS_MAPPING = { "hf_id": "", "local": "./data/OlympiadBench", }, + "opencompass/ClimaQA-Gold": { + "ms_id": "", + "hf_id": "", + "local": "./data/climaqa_gold", + }, + "opencompass/ClimaQA-Silver": { + "ms_id": "", + "hf_id": "", + "local": "./data/climaqa_silver", + }, + "opencompass/PHYSICS-textonly": { + "ms_id": "", + "hf_id": "", + "local": "./data/PHYSICS-textonly", + }, + } DATASETS_URL = { + "/climaqa_gold": { + "url": + "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/climaqa_gold.zip", + "md5": "310cd0dc96db2bbbce798c40e2163ac2", + }, + "/climaqa_silver": { + "url": + "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/climaqa_silver.zip", + "md5": "acdd955f1c170539c5233c12f7227c58", + }, + "/PHYSICS-textonly": { + "url": + "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/PHYSICS-textonly.zip", + "md5": "92be6846a22dd4da942ca43f0638c709", + }, "/OlympiadBench": { "url": "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/OlympiadBench.zip", From 65ff602cf556f59a279cf7a81ded95a9b37322d4 Mon Sep 17 00:00:00 2001 From: Linchen Xiao Date: Tue, 15 Apr 2025 11:33:16 +0800 Subject: [PATCH 5/5] [Update] Fix LLM Judge metrics cacluation & Add reasoning content concat to OpenAI SDK --- opencompass/datasets/generic.py | 3 +- opencompass/models/openai_api.py | 48 ++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/opencompass/datasets/generic.py b/opencompass/datasets/generic.py index 07b6a0bb..deca2486 100644 --- a/opencompass/datasets/generic.py +++ b/opencompass/datasets/generic.py @@ -37,7 +37,6 @@ def get_final_results(judged_answers, is_correct = is_correct_count / count is_incorrect = is_incorrect_count / count is_given_attempted = is_correct + is_incorrect - loose_accuracy = is_correct / count accuracy_given_attempted = (is_correct / is_given_attempted if is_given_attempted > 0 else 0) attempted_judge_ratio = attempted_judge_count / count @@ -46,7 +45,7 @@ def get_final_results(judged_answers, (accuracy_given_attempted + is_correct) if (accuracy_given_attempted + is_correct) > 0 else 0) result = { - metric_name: loose_accuracy * 100, + metric_name: is_correct * 100, f'{metric_name}_given_attempted': accuracy_given_attempted * 100, 'f1': f1, 'attempted_ratio': attempted_judge_ratio * 100, diff --git a/opencompass/models/openai_api.py b/opencompass/models/openai_api.py index 7b2c2c53..6ef11b8f 100644 --- a/opencompass/models/openai_api.py +++ b/opencompass/models/openai_api.py @@ -531,27 +531,26 @@ class OpenAI(BaseAPIModel): class OpenAISDK(OpenAI): - def __init__( - self, - path: str = 'gpt-3.5-turbo', - max_seq_len: int = 16384, - query_per_second: int = 1, - rpm_verbose: bool = False, - retry: int = 2, - key: str | List[str] = 'ENV', - org: str | List[str] | None = None, - meta_template: Dict | None = None, - openai_api_base: str | List[str] = OPENAISDK_API_BASE, - openai_proxy_url: Optional[str] = None, - mode: str = 'none', - logprobs: bool | None = False, - top_logprobs: int | None = None, - temperature: float | None = None, - tokenizer_path: str | None = None, - extra_body: Dict | None = None, - verbose: bool = False, - status_code_mappings: dict = {}, - ): + def __init__(self, + path: str = 'gpt-3.5-turbo', + max_seq_len: int = 16384, + query_per_second: int = 1, + rpm_verbose: bool = False, + retry: int = 2, + key: str | List[str] = 'ENV', + org: str | List[str] | None = None, + meta_template: Dict | None = None, + openai_api_base: str | List[str] = OPENAISDK_API_BASE, + openai_proxy_url: Optional[str] = None, + mode: str = 'none', + logprobs: bool | None = False, + top_logprobs: int | None = None, + temperature: float | None = None, + tokenizer_path: str | None = None, + extra_body: Dict | None = None, + verbose: bool = False, + status_code_mappings: dict = {}, + think_tag: str = ''): super().__init__( path, max_seq_len, @@ -596,6 +595,7 @@ class OpenAISDK(OpenAI): if self.verbose: self.logger.info(f'Used openai_client: {self.openai_client}') self.status_code_mappings = status_code_mappings + self.think_tag = think_tag def _generate(self, input: PromptList | str, @@ -670,6 +670,12 @@ class OpenAISDK(OpenAI): num_retries += 1 # Continue to retry instead of returning empty response continue + # If the model has reasoning_content, concat it + # with the content + if hasattr(responses.choices[0].message, 'reasoning_content'): + return (responses.choices[0].message.reasoning_content + + self.think_tag + + responses.choices[0].message.content) return responses.choices[0].message.content