mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
fix
This commit is contained in:
parent
032669c97a
commit
da9b77be1c
@ -997,14 +997,14 @@
|
|||||||
paper: https://arxiv.org/pdf/2502.14739
|
paper: https://arxiv.org/pdf/2502.14739
|
||||||
configpath: opencompass/configs/datasets/supergpqa
|
configpath: opencompass/configs/datasets/supergpqa
|
||||||
configpath_llmjudge: ''
|
configpath_llmjudge: ''
|
||||||
- climateqa:
|
- climaqa:
|
||||||
name: ClimateQA
|
name: ClimaQA
|
||||||
category: Science
|
category: Science
|
||||||
paper: https://arxiv.org/pdf/2410.16701
|
paper: https://arxiv.org/pdf/2410.16701
|
||||||
comfigpath: ''
|
comfigpath: ''
|
||||||
configpath_llmjudge:
|
configpath_llmjudge:
|
||||||
- opencompass/configs/datasets/ClimateQA/ClimateQA_Gold_llm_judge.py
|
- opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge.py
|
||||||
- opencompass/configs/datasets/ClimateQA/ClimateQA_Silver_llm_judge.py
|
- opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge.py
|
||||||
- physics:
|
- physics:
|
||||||
name: PHYSICS
|
name: PHYSICS
|
||||||
category: Science
|
category: Science
|
||||||
|
@ -0,0 +1,4 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .ClimaQA_Gold_llm_judge_gen_f15343 import climaqa_datasets # noqa: F401, F403
|
@ -0,0 +1,164 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import ClimaQADataset, generic_llmjudge_postprocess
|
||||||
|
|
||||||
|
from opencompass.evaluator import GenericLLMEvaluator
|
||||||
|
|
||||||
|
climaqa_gold_sets = [
|
||||||
|
'mcq',
|
||||||
|
'cloze',
|
||||||
|
'ffq'
|
||||||
|
]
|
||||||
|
|
||||||
|
GRADER_TEMPLATE_mcq = """
|
||||||
|
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||||
|
|
||||||
|
Here are some evaluation criteria:
|
||||||
|
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||||
|
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
|
||||||
|
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
|
||||||
|
|
||||||
|
|
||||||
|
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||||
|
A: CORRECT
|
||||||
|
B: INCORRECT
|
||||||
|
Just return the letters "A" or "B", with no text around it.
|
||||||
|
|
||||||
|
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||||
|
|
||||||
|
|
||||||
|
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||||
|
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||||
|
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||||
|
|
||||||
|
Judging the correctness of candidates' answers:
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
|
||||||
|
GRADER_TEMPLATE_cloze = """
|
||||||
|
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||||
|
|
||||||
|
Here are some evaluation criteria:
|
||||||
|
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||||
|
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
|
||||||
|
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
|
||||||
|
|
||||||
|
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||||
|
A: CORRECT
|
||||||
|
B: INCORRECT
|
||||||
|
Just return the letters "A" or "B", with no text around it.
|
||||||
|
|
||||||
|
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||||
|
|
||||||
|
|
||||||
|
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||||
|
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||||
|
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||||
|
|
||||||
|
Judging the correctness of candidates' answers:
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
|
||||||
|
GRADER_TEMPLATE_ffq = """
|
||||||
|
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||||
|
|
||||||
|
Here are some evaluation criteria:
|
||||||
|
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
|
||||||
|
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||||
|
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
|
||||||
|
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||||
|
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
|
||||||
|
|
||||||
|
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||||
|
A: very close to the answer
|
||||||
|
B: not very close to the answer
|
||||||
|
Just return the letters "A" or "B", with no text around it.
|
||||||
|
|
||||||
|
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||||
|
|
||||||
|
|
||||||
|
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||||
|
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||||
|
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||||
|
|
||||||
|
Judging the correctness of candidates' answers:
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
|
||||||
|
climaqa_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||||
|
|
||||||
|
climaqa_datasets = []
|
||||||
|
|
||||||
|
for _task in climaqa_gold_sets:
|
||||||
|
|
||||||
|
if _task == 'mcq':
|
||||||
|
GRADER_TEMPLATE = GRADER_TEMPLATE_mcq
|
||||||
|
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: "
|
||||||
|
if _task == 'ffq':
|
||||||
|
GRADER_TEMPLATE = GRADER_TEMPLATE_ffq
|
||||||
|
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: "
|
||||||
|
if _task == 'cloze':
|
||||||
|
GRADER_TEMPLATE = GRADER_TEMPLATE_cloze
|
||||||
|
infer_prompt = f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: "
|
||||||
|
|
||||||
|
climaqa_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt=infer_prompt,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer),
|
||||||
|
)
|
||||||
|
|
||||||
|
climaqa_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=GenericLLMEvaluator,
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin=[
|
||||||
|
dict(
|
||||||
|
role='SYSTEM',
|
||||||
|
fallback_role='HUMAN',
|
||||||
|
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||||
|
)
|
||||||
|
],
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dataset_cfg=dict(
|
||||||
|
type=ClimaQADataset,
|
||||||
|
path='opencompass/ClimaQA-Gold',
|
||||||
|
task=_task,
|
||||||
|
abbr='ClimaQA_Gold_' + _task,
|
||||||
|
reader_cfg=climaqa_reader_cfg,
|
||||||
|
),
|
||||||
|
judge_cfg=dict(),
|
||||||
|
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
climaqa_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='ClimaQA_Gold_' + _task,
|
||||||
|
type=ClimaQADataset,
|
||||||
|
path='opencompass/ClimaQA-Gold',
|
||||||
|
task=_task,
|
||||||
|
reader_cfg=climaqa_reader_cfg,
|
||||||
|
infer_cfg=climaqa_infer_cfg,
|
||||||
|
eval_cfg=climaqa_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
@ -0,0 +1,4 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .ClimaQA_Silver_llm_judge_gen_f15343 import climaqa_datasets # noqa: F401, F403
|
@ -0,0 +1,160 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import ClimaQADataset, generic_llmjudge_postprocess
|
||||||
|
|
||||||
|
from opencompass.evaluator import GenericLLMEvaluator
|
||||||
|
|
||||||
|
climaqa_silver_sets = [
|
||||||
|
'mcq',
|
||||||
|
'cloze',
|
||||||
|
'ffq'
|
||||||
|
]
|
||||||
|
|
||||||
|
GRADER_TEMPLATE_mcq = """
|
||||||
|
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||||
|
|
||||||
|
Here are some evaluation criteria:
|
||||||
|
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||||
|
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
|
||||||
|
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
|
||||||
|
|
||||||
|
|
||||||
|
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||||
|
A: CORRECT
|
||||||
|
B: INCORRECT
|
||||||
|
Just return the letters "A" or "B", with no text around it.
|
||||||
|
|
||||||
|
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||||
|
|
||||||
|
|
||||||
|
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||||
|
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||||
|
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||||
|
|
||||||
|
Judging the correctness of candidates' answers:
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
GRADER_TEMPLATE_cloze = """
|
||||||
|
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||||
|
|
||||||
|
Here are some evaluation criteria:
|
||||||
|
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||||
|
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
|
||||||
|
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
|
||||||
|
|
||||||
|
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||||
|
A: CORRECT
|
||||||
|
B: INCORRECT
|
||||||
|
Just return the letters "A" or "B", with no text around it.
|
||||||
|
|
||||||
|
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||||
|
|
||||||
|
|
||||||
|
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||||
|
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||||
|
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||||
|
|
||||||
|
Judging the correctness of candidates' answers:
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
GRADER_TEMPLATE_ffq = """
|
||||||
|
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||||
|
|
||||||
|
Here are some evaluation criteria:
|
||||||
|
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
|
||||||
|
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||||
|
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
|
||||||
|
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||||
|
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
|
||||||
|
|
||||||
|
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||||
|
A: very close to the answer
|
||||||
|
B: not very close to the answer
|
||||||
|
Just return the letters "A" or "B", with no text around it.
|
||||||
|
|
||||||
|
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||||
|
|
||||||
|
|
||||||
|
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||||
|
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||||
|
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||||
|
|
||||||
|
Judging the correctness of candidates' answers:
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
climaqa_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||||
|
|
||||||
|
climaqa_datasets = []
|
||||||
|
|
||||||
|
for _task in climaqa_silver_sets:
|
||||||
|
|
||||||
|
if _task == 'mcq':
|
||||||
|
GRADER_TEMPLATE = GRADER_TEMPLATE_mcq
|
||||||
|
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: "
|
||||||
|
if _task == 'ffq':
|
||||||
|
GRADER_TEMPLATE = GRADER_TEMPLATE_ffq
|
||||||
|
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: "
|
||||||
|
if _task == 'cloze':
|
||||||
|
GRADER_TEMPLATE = GRADER_TEMPLATE_cloze
|
||||||
|
infer_prompt = f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: "
|
||||||
|
|
||||||
|
climaqa_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt=infer_prompt,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer),
|
||||||
|
)
|
||||||
|
|
||||||
|
climaqa_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=GenericLLMEvaluator,
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin=[
|
||||||
|
dict(
|
||||||
|
role='SYSTEM',
|
||||||
|
fallback_role='HUMAN',
|
||||||
|
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||||
|
)
|
||||||
|
],
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
dataset_cfg=dict(
|
||||||
|
type=ClimaQADataset,
|
||||||
|
path='opencompass/ClimaQA-Silver',
|
||||||
|
task=_task,
|
||||||
|
abbr='ClimaQA_Silver_' + _task,
|
||||||
|
reader_cfg=climaqa_reader_cfg,
|
||||||
|
),
|
||||||
|
judge_cfg=dict(),
|
||||||
|
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
climaqa_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='ClimaQA_Silver_' + _task,
|
||||||
|
type=ClimaQADataset,
|
||||||
|
path='opencompass/ClimaQA-Silver',
|
||||||
|
task=_task,
|
||||||
|
reader_cfg=climaqa_reader_cfg,
|
||||||
|
infer_cfg=climaqa_infer_cfg,
|
||||||
|
eval_cfg=climaqa_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
@ -1,4 +0,0 @@
|
|||||||
from mmengine.config import read_base
|
|
||||||
|
|
||||||
with read_base():
|
|
||||||
from .ClimateQA_Gold_llm_judge_gen_f15343 import climateqa_datasets # noqa: F401, F403
|
|
@ -1,270 +0,0 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
|
||||||
from opencompass.datasets import ClimateQADataset, generic_llmjudge_postprocess
|
|
||||||
|
|
||||||
from opencompass.evaluator import GenericLLMEvaluator
|
|
||||||
|
|
||||||
climateqa_gold_sets = [
|
|
||||||
'mcq',
|
|
||||||
'cloze',
|
|
||||||
'ffq'
|
|
||||||
]
|
|
||||||
|
|
||||||
GRADER_TEMPLATE_mcq = """
|
|
||||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
|
||||||
|
|
||||||
Here are some evaluation criteria:
|
|
||||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
|
||||||
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
|
|
||||||
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
|
|
||||||
|
|
||||||
|
|
||||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
|
||||||
A: CORRECT
|
|
||||||
B: INCORRECT
|
|
||||||
Just return the letters "A" or "B", with no text around it.
|
|
||||||
|
|
||||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
||||||
|
|
||||||
|
|
||||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
|
||||||
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
|
|
||||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
|
||||||
|
|
||||||
Judging the correctness of candidates' answers:
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
|
|
||||||
GRADER_TEMPLATE_cloze = """
|
|
||||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
|
||||||
|
|
||||||
Here are some evaluation criteria:
|
|
||||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
|
||||||
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
|
|
||||||
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
|
|
||||||
|
|
||||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
|
||||||
A: CORRECT
|
|
||||||
B: INCORRECT
|
|
||||||
Just return the letters "A" or "B", with no text around it.
|
|
||||||
|
|
||||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
||||||
|
|
||||||
|
|
||||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
|
||||||
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
|
|
||||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
|
||||||
|
|
||||||
Judging the correctness of candidates' answers:
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
|
|
||||||
GRADER_TEMPLATE_ffq = """
|
|
||||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
|
||||||
|
|
||||||
Here are some evaluation criteria:
|
|
||||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
|
|
||||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
|
||||||
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
|
|
||||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
|
||||||
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
|
|
||||||
|
|
||||||
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
|
||||||
A: very close to the answer
|
|
||||||
B: not very close to the answer
|
|
||||||
Just return the letters "A" or "B", with no text around it.
|
|
||||||
|
|
||||||
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
||||||
|
|
||||||
|
|
||||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
|
||||||
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
|
|
||||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
|
||||||
|
|
||||||
Judging the correctness of candidates' answers:
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
|
|
||||||
climateqa_reader_cfg = dict(input_columns=['input'], output_column='Answer')
|
|
||||||
|
|
||||||
climateqa_datasets = []
|
|
||||||
|
|
||||||
for _task in climateqa_gold_sets:
|
|
||||||
if _task == 'mcq':
|
|
||||||
climateqa_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: ",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer),
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_eval_cfg = dict(
|
|
||||||
evaluator=dict(
|
|
||||||
type=GenericLLMEvaluator,
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE_mcq),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dataset_cfg=dict(
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_gold/'+_task,
|
|
||||||
abbr='ClimateQA_Gold_'+_task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
),
|
|
||||||
judge_cfg=dict(),
|
|
||||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
|
||||||
),
|
|
||||||
pred_role='BOT',
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_datasets.append(
|
|
||||||
dict(
|
|
||||||
abbr='ClimateQA_Gold_'+_task,
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_gold/'+_task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
infer_cfg=climateqa_infer_cfg,
|
|
||||||
eval_cfg=climateqa_eval_cfg,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if _task == 'cloze':
|
|
||||||
climateqa_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt=f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: ",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer),
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_eval_cfg = dict(
|
|
||||||
evaluator=dict(
|
|
||||||
type=GenericLLMEvaluator,
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE_cloze),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dataset_cfg=dict(
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_gold/'+_task,
|
|
||||||
abbr='ClimateQA_Gold_'+_task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
),
|
|
||||||
judge_cfg=dict(),
|
|
||||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
|
||||||
),
|
|
||||||
pred_role='BOT',
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_datasets.append(
|
|
||||||
dict(
|
|
||||||
abbr='ClimateQA_Gold_'+_task,
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_gold/'+_task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
infer_cfg=climateqa_infer_cfg,
|
|
||||||
eval_cfg=climateqa_eval_cfg,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if _task == 'ffq':
|
|
||||||
climateqa_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: ",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer),
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_eval_cfg = dict(
|
|
||||||
evaluator=dict(
|
|
||||||
type=GenericLLMEvaluator,
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE_ffq),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dataset_cfg=dict(
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_gold/'+_task,
|
|
||||||
abbr='ClimateQA_Gold_' + _task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
),
|
|
||||||
judge_cfg=dict(),
|
|
||||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
|
||||||
),
|
|
||||||
pred_role='BOT',
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_datasets.append(
|
|
||||||
dict(
|
|
||||||
abbr='ClimateQA_Gold_'+_task,
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_gold/'+_task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
infer_cfg=climateqa_infer_cfg,
|
|
||||||
eval_cfg=climateqa_eval_cfg,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
|||||||
from mmengine.config import read_base
|
|
||||||
|
|
||||||
with read_base():
|
|
||||||
from .ClimateQA_Silver_llm_judge_gen_f15343 import climateqa_datasets # noqa: F401, F403
|
|
@ -1,263 +0,0 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
|
||||||
from opencompass.datasets import ClimateQADataset, generic_llmjudge_postprocess
|
|
||||||
|
|
||||||
from opencompass.evaluator import GenericLLMEvaluator
|
|
||||||
|
|
||||||
climateqa_gold_sets = [
|
|
||||||
'mcq',
|
|
||||||
'cloze',
|
|
||||||
'ffq'
|
|
||||||
]
|
|
||||||
|
|
||||||
GRADER_TEMPLATE_mcq = """
|
|
||||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
|
||||||
|
|
||||||
Here are some evaluation criteria:
|
|
||||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
|
||||||
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
|
|
||||||
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
|
|
||||||
|
|
||||||
|
|
||||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
|
||||||
A: CORRECT
|
|
||||||
B: INCORRECT
|
|
||||||
Just return the letters "A" or "B", with no text around it.
|
|
||||||
|
|
||||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
||||||
|
|
||||||
|
|
||||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
|
||||||
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
|
|
||||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
|
||||||
|
|
||||||
Judging the correctness of candidates' answers:
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
GRADER_TEMPLATE_cloze = """
|
|
||||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
|
||||||
|
|
||||||
Here are some evaluation criteria:
|
|
||||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
|
||||||
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
|
|
||||||
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
|
|
||||||
|
|
||||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
|
||||||
A: CORRECT
|
|
||||||
B: INCORRECT
|
|
||||||
Just return the letters "A" or "B", with no text around it.
|
|
||||||
|
|
||||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
||||||
|
|
||||||
|
|
||||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
|
||||||
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
|
|
||||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
|
||||||
|
|
||||||
Judging the correctness of candidates' answers:
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
GRADER_TEMPLATE_ffq = """
|
|
||||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
|
||||||
|
|
||||||
Here are some evaluation criteria:
|
|
||||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
|
|
||||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
|
||||||
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
|
|
||||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
|
||||||
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
|
|
||||||
|
|
||||||
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
|
||||||
A: very close to the answer
|
|
||||||
B: not very close to the answer
|
|
||||||
Just return the letters "A" or "B", with no text around it.
|
|
||||||
|
|
||||||
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
||||||
|
|
||||||
|
|
||||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
|
||||||
<Gold Target Begin>: \n{Answer}\n<Gold Target End>\n\n
|
|
||||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
|
||||||
|
|
||||||
Judging the correctness of candidates' answers:
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
climateqa_reader_cfg = dict(input_columns=['input'], output_column='Answer')
|
|
||||||
|
|
||||||
climateqa_datasets = []
|
|
||||||
|
|
||||||
for _task in climateqa_gold_sets:
|
|
||||||
if _task == 'mcq':
|
|
||||||
climateqa_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: ",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer),
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_eval_cfg = dict(
|
|
||||||
evaluator=dict(
|
|
||||||
type=GenericLLMEvaluator,
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE_mcq),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dataset_cfg=dict(
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_silver/' + _task,
|
|
||||||
abbr='ClimateQA_Silver_' + _task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
),
|
|
||||||
judge_cfg=dict(),
|
|
||||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
|
||||||
),
|
|
||||||
pred_role='BOT',
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_datasets.append(
|
|
||||||
dict(
|
|
||||||
abbr='ClimateQA_Silver_' + _task,
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_silver/' + _task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
infer_cfg=climateqa_infer_cfg,
|
|
||||||
eval_cfg=climateqa_eval_cfg,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if _task == 'cloze':
|
|
||||||
climateqa_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt=f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: ",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer),
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_eval_cfg = dict(
|
|
||||||
evaluator=dict(
|
|
||||||
type=GenericLLMEvaluator,
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE_cloze),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dataset_cfg=dict(
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_Silver/' + _task,
|
|
||||||
abbr='ClimateQA_Silver_' + _task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
),
|
|
||||||
judge_cfg=dict(),
|
|
||||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
|
||||||
),
|
|
||||||
pred_role='BOT',
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_datasets.append(
|
|
||||||
dict(
|
|
||||||
abbr='ClimateQA_Silver_' + _task,
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_silver/' + _task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
infer_cfg=climateqa_infer_cfg,
|
|
||||||
eval_cfg=climateqa_eval_cfg,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if _task == 'ffq':
|
|
||||||
climateqa_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt=f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: ",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer),
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_eval_cfg = dict(
|
|
||||||
evaluator=dict(
|
|
||||||
type=GenericLLMEvaluator,
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE_ffq),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dataset_cfg=dict(
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_silver/' + _task,
|
|
||||||
abbr='ClimateQA_Silver_' + _task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
),
|
|
||||||
judge_cfg=dict(),
|
|
||||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
|
||||||
),
|
|
||||||
pred_role='BOT',
|
|
||||||
)
|
|
||||||
|
|
||||||
climateqa_datasets.append(
|
|
||||||
dict(
|
|
||||||
abbr='ClimateQA_Silver_' + _task,
|
|
||||||
type=ClimateQADataset,
|
|
||||||
path='data/climaqa_silver/' + _task,
|
|
||||||
reader_cfg=climateqa_reader_cfg,
|
|
||||||
infer_cfg=climateqa_infer_cfg,
|
|
||||||
eval_cfg=climateqa_eval_cfg,
|
|
||||||
)
|
|
||||||
)
|
|
@ -1,109 +0,0 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
|
||||||
from opencompass.datasets import (
|
|
||||||
PHYSICSDataset,
|
|
||||||
generic_llmjudge_postprocess,
|
|
||||||
)
|
|
||||||
from opencompass.evaluator import GenericLLMEvaluator
|
|
||||||
|
|
||||||
physics_sets = [
|
|
||||||
'atomic_dataset_textonly',
|
|
||||||
'electro_dataset_textonly',
|
|
||||||
'mechanics_dataset_textonly',
|
|
||||||
'optics_dataset_textonly',
|
|
||||||
'quantum_dataset_textonly',
|
|
||||||
'statistics_dataset_textonly',
|
|
||||||
]
|
|
||||||
|
|
||||||
GRADER_TEMPLATE = """
|
|
||||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
|
||||||
|
|
||||||
Here are some evaluation criteria:
|
|
||||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
|
||||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
|
||||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
|
||||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
|
||||||
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
|
|
||||||
|
|
||||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
|
||||||
A: CORRECT
|
|
||||||
B: INCORRECT
|
|
||||||
Just return the letters "A" or "B", with no text around it.
|
|
||||||
|
|
||||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
||||||
|
|
||||||
|
|
||||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
|
||||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
|
||||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
|
||||||
|
|
||||||
Judging the correctness of candidates' answers:
|
|
||||||
""".strip()
|
|
||||||
|
|
||||||
|
|
||||||
physics_reader_cfg = dict(input_columns=['input'], output_column='target')
|
|
||||||
|
|
||||||
physics_datasets = []
|
|
||||||
|
|
||||||
for _name in physics_sets:
|
|
||||||
|
|
||||||
physics_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt=f'Answer the given question step by step. Begin by explaining your reasoning process clearly. Conclude by stating the final answer using the following format: “Provide the final answer at the end in LaTeX boxed format final_answer.” Think step by step before answering.\n\nQ: {{input}}\nA: ',
|
|
||||||
)
|
|
||||||
]
|
|
||||||
),
|
|
||||||
),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer),
|
|
||||||
)
|
|
||||||
|
|
||||||
physics_eval_cfg = dict(
|
|
||||||
evaluator=dict(
|
|
||||||
type=GenericLLMEvaluator,
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
|
||||||
)
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dataset_cfg=dict(
|
|
||||||
type=PHYSICSDataset,
|
|
||||||
path='data/PHYSICS-textonly/',
|
|
||||||
abbr='PHYSICS_' + _name,
|
|
||||||
name=_name,
|
|
||||||
reader_cfg=physics_reader_cfg,
|
|
||||||
),
|
|
||||||
judge_cfg=dict(),
|
|
||||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
|
||||||
),
|
|
||||||
pred_role='BOT',
|
|
||||||
)
|
|
||||||
|
|
||||||
physics_datasets.append(
|
|
||||||
dict(
|
|
||||||
abbr='PHYSICS_' + _name,
|
|
||||||
type=PHYSICSDataset,
|
|
||||||
path='data/PHYSICS-textonly/',
|
|
||||||
name=_name,
|
|
||||||
reader_cfg=physics_reader_cfg,
|
|
||||||
infer_cfg=physics_infer_cfg,
|
|
||||||
eval_cfg=physics_eval_cfg,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
@ -0,0 +1,4 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .PHYSICS_llm_judge_gen_88cafb import physics_datasets # noqa: F401, F403
|
@ -25,7 +25,7 @@ from .chinese_simpleqa import * # noqa: F401, F403
|
|||||||
from .cibench import * # noqa: F401, F403
|
from .cibench import * # noqa: F401, F403
|
||||||
from .circular import * # noqa: F401, F403
|
from .circular import * # noqa: F401, F403
|
||||||
from .civilcomments import * # noqa: F401, F403
|
from .civilcomments import * # noqa: F401, F403
|
||||||
from .climateqa import * # noqa: F401, F403
|
from .climaqa import * # noqa: F401, F403
|
||||||
from .clozeTest_maxmin import * # noqa: F401, F403
|
from .clozeTest_maxmin import * # noqa: F401, F403
|
||||||
from .cluewsc import * # noqa: F401, F403
|
from .cluewsc import * # noqa: F401, F403
|
||||||
from .cmb import * # noqa: F401, F403
|
from .cmb import * # noqa: F401, F403
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset
|
||||||
|
|
||||||
from opencompass.datasets.base import BaseDataset
|
from opencompass.datasets.base import BaseDataset
|
||||||
@ -6,12 +8,13 @@ from opencompass.utils import get_data_path
|
|||||||
|
|
||||||
|
|
||||||
@LOAD_DATASET.register_module()
|
@LOAD_DATASET.register_module()
|
||||||
class ClimateQADataset(BaseDataset):
|
class ClimaQADataset(BaseDataset):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(path: str, **kwargs):
|
def load(path: str, task: str, **kwargs):
|
||||||
|
|
||||||
path = get_data_path(path, local_mode=True)
|
path = get_data_path(path)
|
||||||
|
path = os.path.join(path, task)
|
||||||
climateqa = load_dataset(path)['train']
|
climateqa = load_dataset(path)['train']
|
||||||
|
|
||||||
input_column = []
|
input_column = []
|
||||||
@ -23,4 +26,5 @@ class ClimateQADataset(BaseDataset):
|
|||||||
else:
|
else:
|
||||||
input_column.append(climateqa[i]['Question'])
|
input_column.append(climateqa[i]['Question'])
|
||||||
climateqa = climateqa.add_column(name='input', column=input_column)
|
climateqa = climateqa.add_column(name='input', column=input_column)
|
||||||
|
climateqa = climateqa.rename_column('Answer', 'target')
|
||||||
return climateqa
|
return climateqa
|
@ -12,9 +12,19 @@ class PHYSICSDataset(BaseDataset):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(path: str, name: str, **kwargs):
|
def load(path: str, name: str, **kwargs):
|
||||||
|
path = get_data_path(path)
|
||||||
path = os.path.join(path, name)
|
path = os.path.join(path, name)
|
||||||
path = get_data_path(path, local_mode=True)
|
|
||||||
physics = load_dataset(path)['train']
|
physics = load_dataset(path)['train']
|
||||||
physics = physics.rename_column('questions', 'input')
|
physics = physics.rename_column('questions', 'input')
|
||||||
physics = physics.rename_column('final_answers', 'target')
|
|
||||||
|
target = []
|
||||||
|
for i in physics:
|
||||||
|
this_final_answer = ''
|
||||||
|
for j in range(len(i['final_answers'])):
|
||||||
|
this_final_answer += 'Answer ' + str(j + 1) + ': '
|
||||||
|
this_final_answer += i['final_answers'][j]
|
||||||
|
this_final_answer += '\n'
|
||||||
|
target.append(this_final_answer)
|
||||||
|
physics = physics.add_column(name='target', column=target)
|
||||||
|
|
||||||
return physics
|
return physics
|
||||||
|
@ -420,9 +420,40 @@ DATASETS_MAPPING = {
|
|||||||
"hf_id": "",
|
"hf_id": "",
|
||||||
"local": "./data/OlympiadBench",
|
"local": "./data/OlympiadBench",
|
||||||
},
|
},
|
||||||
|
"opencompass/ClimaQA-Gold": {
|
||||||
|
"ms_id": "",
|
||||||
|
"hf_id": "",
|
||||||
|
"local": "./data/climaqa_gold",
|
||||||
|
},
|
||||||
|
"opencompass/ClimaQA-Silver": {
|
||||||
|
"ms_id": "",
|
||||||
|
"hf_id": "",
|
||||||
|
"local": "./data/climaqa_silver",
|
||||||
|
},
|
||||||
|
"opencompass/PHYSICS-textonly": {
|
||||||
|
"ms_id": "",
|
||||||
|
"hf_id": "",
|
||||||
|
"local": "./data/PHYSICS-textonly",
|
||||||
|
},
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DATASETS_URL = {
|
DATASETS_URL = {
|
||||||
|
"/climaqa_gold": {
|
||||||
|
"url":
|
||||||
|
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/climaqa_gold.zip",
|
||||||
|
"md5": "310cd0dc96db2bbbce798c40e2163ac2",
|
||||||
|
},
|
||||||
|
"/climaqa_silver": {
|
||||||
|
"url":
|
||||||
|
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/climaqa_silver.zip",
|
||||||
|
"md5": "acdd955f1c170539c5233c12f7227c58",
|
||||||
|
},
|
||||||
|
"/PHYSICS-textonly": {
|
||||||
|
"url":
|
||||||
|
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/PHYSICS-textonly.zip",
|
||||||
|
"md5": "92be6846a22dd4da942ca43f0638c709",
|
||||||
|
},
|
||||||
"/OlympiadBench": {
|
"/OlympiadBench": {
|
||||||
"url":
|
"url":
|
||||||
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/OlympiadBench.zip",
|
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/OlympiadBench.zip",
|
||||||
|
Loading…
Reference in New Issue
Block a user