feat ClimateQA

This commit is contained in:
Myhs-phz 2025-04-10 09:18:11 +00:00
parent 12213207b6
commit 390e33e51b
7 changed files with 576 additions and 0 deletions

View File

@ -997,3 +997,11 @@
paper: https://arxiv.org/pdf/2502.14739
configpath: opencompass/configs/datasets/supergpqa
configpath_llmjudge: ''
- climateqa:
name: ClimateQA
category: Science
paper: https://arxiv.org/pdf/2410.16701
comfigpath: ''
configpath_llmjudge:
- opencompass/configs/datasets/ClimateQA/ClimateQA_Gold_llm_judge.py
- opencompass/configs/datasets/ClimateQA/ClimateQA_Silver_llm_judge.py

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .ClimateQA_Gold_llm_judge_gen_f15343 import climateqa_datasets # noqa: F401, F403

View File

@ -0,0 +1,270 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import ClimateQADataset, generic_llmjudge_postprocess
from opencompass.evaluator import GenericLLMEvaluator
climateqa_gold_sets = [
'mcq',
'cloze',
'ffq'
]
GRADER_TEMPLATE_mcq = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
GRADER_TEMPLATE_cloze = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
GRADER_TEMPLATE_ffq = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: very close to the answer
B: not very close to the answer
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
climateqa_reader_cfg = dict(input_columns=['input'], output_column='Answer')
climateqa_datasets = []
for _task in climateqa_gold_sets:
if _task == 'mcq':
climateqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: ",
)
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
climateqa_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE_mcq),
],
),
),
dataset_cfg=dict(
type=ClimateQADataset,
path='data/climaqa_gold/'+_task,
abbr='ClimateQA_Gold_'+_task,
reader_cfg=climateqa_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
climateqa_datasets.append(
dict(
abbr='ClimateQA_Gold_'+_task,
type=ClimateQADataset,
path='data/climaqa_gold/'+_task,
reader_cfg=climateqa_reader_cfg,
infer_cfg=climateqa_infer_cfg,
eval_cfg=climateqa_eval_cfg,
)
)
if _task == 'cloze':
climateqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: ",
)
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
climateqa_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE_cloze),
],
),
),
dataset_cfg=dict(
type=ClimateQADataset,
path='data/climaqa_gold/'+_task,
abbr='ClimateQA_Gold_'+_task,
reader_cfg=climateqa_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
climateqa_datasets.append(
dict(
abbr='ClimateQA_Gold_'+_task,
type=ClimateQADataset,
path='data/climaqa_gold/'+_task,
reader_cfg=climateqa_reader_cfg,
infer_cfg=climateqa_infer_cfg,
eval_cfg=climateqa_eval_cfg,
)
)
if _task == 'ffq':
climateqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: ",
)
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
climateqa_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE_ffq),
],
),
),
dataset_cfg=dict(
type=ClimateQADataset,
path='data/climaqa_gold/'+_task,
abbr='ClimateQA_Gold_' + _task,
reader_cfg=climateqa_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
climateqa_datasets.append(
dict(
abbr='ClimateQA_Gold_'+_task,
type=ClimateQADataset,
path='data/climaqa_gold/'+_task,
reader_cfg=climateqa_reader_cfg,
infer_cfg=climateqa_infer_cfg,
eval_cfg=climateqa_eval_cfg,
)
)

View File

@ -0,0 +1,4 @@
from mmengine.config import read_base
with read_base():
from .ClimateQA_Silver_llm_judge_gen_f15343 import climateqa_datasets # noqa: F401, F403

View File

@ -0,0 +1,263 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import ClimateQADataset, generic_llmjudge_postprocess
from opencompass.evaluator import GenericLLMEvaluator
climateqa_gold_sets = [
'mcq',
'cloze',
'ffq'
]
GRADER_TEMPLATE_mcq = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
GRADER_TEMPLATE_cloze = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
GRADER_TEMPLATE_ffq = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: very close to the answer
B: not very close to the answer
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
climateqa_reader_cfg = dict(input_columns=['input'], output_column='Answer')
climateqa_datasets = []
for _task in climateqa_gold_sets:
if _task == 'mcq':
climateqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: ",
)
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
climateqa_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE_mcq),
],
),
),
dataset_cfg=dict(
type=ClimateQADataset,
path='data/climaqa_silver/' + _task,
abbr='ClimateQA_Silver_' + _task,
reader_cfg=climateqa_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
climateqa_datasets.append(
dict(
abbr='ClimateQA_Silver_' + _task,
type=ClimateQADataset,
path='data/climaqa_silver/' + _task,
reader_cfg=climateqa_reader_cfg,
infer_cfg=climateqa_infer_cfg,
eval_cfg=climateqa_eval_cfg,
)
)
if _task == 'cloze':
climateqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: ",
)
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
climateqa_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE_cloze),
],
),
),
dataset_cfg=dict(
type=ClimateQADataset,
path='data/climaqa_Silver/' + _task,
abbr='ClimateQA_Silver_' + _task,
reader_cfg=climateqa_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
climateqa_datasets.append(
dict(
abbr='ClimateQA_Silver_' + _task,
type=ClimateQADataset,
path='data/climaqa_silver/' + _task,
reader_cfg=climateqa_reader_cfg,
infer_cfg=climateqa_infer_cfg,
eval_cfg=climateqa_eval_cfg,
)
)
if _task == 'ffq':
climateqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: ",
)
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
climateqa_eval_cfg = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE_ffq),
],
),
),
dataset_cfg=dict(
type=ClimateQADataset,
path='data/climaqa_silver/' + _task,
abbr='ClimateQA_Silver_' + _task,
reader_cfg=climateqa_reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
pred_role='BOT',
)
climateqa_datasets.append(
dict(
abbr='ClimateQA_Silver_' + _task,
type=ClimateQADataset,
path='data/climaqa_silver/' + _task,
reader_cfg=climateqa_reader_cfg,
infer_cfg=climateqa_infer_cfg,
eval_cfg=climateqa_eval_cfg,
)
)

View File

@ -25,6 +25,7 @@ from .chinese_simpleqa import * # noqa: F401, F403
from .cibench import * # noqa: F401, F403
from .circular import * # noqa: F401, F403
from .civilcomments import * # noqa: F401, F403
from .climateqa import * # noqa: F401, F403
from .clozeTest_maxmin import * # noqa: F401, F403
from .cluewsc import * # noqa: F401, F403
from .cmb import * # noqa: F401, F403

View File

@ -0,0 +1,26 @@
from datasets import load_dataset
from opencompass.datasets.base import BaseDataset
from opencompass.registry import LOAD_DATASET
from opencompass.utils import get_data_path
@LOAD_DATASET.register_module()
class ClimateQADataset(BaseDataset):
@staticmethod
def load(path: str, **kwargs):
path = get_data_path(path, local_mode=True)
climateqa = load_dataset(path)['train']
input_column = []
for i in range(len(climateqa)):
if 'Options' in climateqa[i].keys(
) and climateqa[i]['Options'] is not None:
input_column.append(climateqa[i]['Question'] + '\n' +
climateqa[i]['Options'])
else:
input_column.append(climateqa[i]['Question'])
climateqa = climateqa.add_column(name='input', column=input_column)
return climateqa