mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Feature] Add Datasets: ClimateQA,Physics (#2017)
* feat ClimateQA * feat PHYSICS * fix * fix * fix * fix
This commit is contained in:
parent
6a6a1a5c0b
commit
75e7834b59
@ -997,3 +997,17 @@
|
||||
paper: https://arxiv.org/pdf/2502.14739
|
||||
configpath: opencompass/configs/datasets/supergpqa
|
||||
configpath_llmjudge: ''
|
||||
- climaqa:
|
||||
name: ClimaQA
|
||||
category: Science
|
||||
paper: https://arxiv.org/pdf/2410.16701
|
||||
configpath: ''
|
||||
configpath_llmjudge:
|
||||
- opencompass/configs/datasets/ClimaQA/ClimaQA_Gold_llm_judge.py
|
||||
- opencompass/configs/datasets/ClimaQA/ClimaQA_Silver_llm_judge.py
|
||||
- physics:
|
||||
name: PHYSICS
|
||||
category: Science
|
||||
paper: https://arxiv.org/pdf/2503.21821
|
||||
configpath: ''
|
||||
configpath_llmjudge: opencompass/configs/datasets/PHYSICS/PHYSICS_llm_judge_gen_a133a2.py
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ClimaQA_Gold_llm_judge_gen_f15343 import climaqa_datasets # noqa: F401, F403
|
@ -0,0 +1,164 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import ClimaQADataset, generic_llmjudge_postprocess
|
||||
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
climaqa_gold_sets = [
|
||||
'mcq',
|
||||
'cloze',
|
||||
'ffq'
|
||||
]
|
||||
|
||||
GRADER_TEMPLATE_mcq = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
|
||||
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
|
||||
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
GRADER_TEMPLATE_cloze = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
|
||||
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
GRADER_TEMPLATE_ffq = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
|
||||
|
||||
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: very close to the answer
|
||||
B: not very close to the answer
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
climaqa_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||
|
||||
climaqa_datasets = []
|
||||
|
||||
for _task in climaqa_gold_sets:
|
||||
|
||||
if _task == 'mcq':
|
||||
GRADER_TEMPLATE = GRADER_TEMPLATE_mcq
|
||||
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: "
|
||||
if _task == 'ffq':
|
||||
GRADER_TEMPLATE = GRADER_TEMPLATE_ffq
|
||||
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: "
|
||||
if _task == 'cloze':
|
||||
GRADER_TEMPLATE = GRADER_TEMPLATE_cloze
|
||||
infer_prompt = f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: "
|
||||
|
||||
climaqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=infer_prompt,
|
||||
)
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
climaqa_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=ClimaQADataset,
|
||||
path='opencompass/ClimaQA-Gold',
|
||||
task=_task,
|
||||
abbr='ClimaQA_Gold_' + _task,
|
||||
reader_cfg=climaqa_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
climaqa_datasets.append(
|
||||
dict(
|
||||
abbr='ClimaQA_Gold_' + _task,
|
||||
type=ClimaQADataset,
|
||||
path='opencompass/ClimaQA-Gold',
|
||||
task=_task,
|
||||
reader_cfg=climaqa_reader_cfg,
|
||||
infer_cfg=climaqa_infer_cfg,
|
||||
eval_cfg=climaqa_eval_cfg,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .ClimaQA_Silver_llm_judge_gen_f15343 import climaqa_datasets # noqa: F401, F403
|
@ -0,0 +1,160 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import ClimaQADataset, generic_llmjudge_postprocess
|
||||
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
climaqa_silver_sets = [
|
||||
'mcq',
|
||||
'cloze',
|
||||
'ffq'
|
||||
]
|
||||
|
||||
GRADER_TEMPLATE_mcq = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. The answer may be one of the four options: a, b, c, or d. Only when the options given by prediction are strictly consistent with the answer, the prediction can be considered correct.
|
||||
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:', and only judge whether the candidate's answer is consistent with the standard answer.
|
||||
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
GRADER_TEMPLATE_cloze = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. The form of the answer is a word or a phrase. Please strictly compare the prediction and the answer. Only when the prediction and the answer are exactly the same, will the prediction be considered correct; otherwise, it will be considered incorrect.
|
||||
3. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is consistent with the standard answer.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
GRADER_TEMPLATE_ffq = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. The type of question is open-ended Q&A. Please compare whether the prediction is close enough to the meaning of the answer and whether the prediction covers each key point in the answer. If the prediction meets the above requirements, it can be considered very close to the answer.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
5. If the prediction is given with 'The answer is:', please ignore the 'The answer is:' and only judge whether the candidate's answer is very close to the standard answer.
|
||||
|
||||
Please judge whether the following answers are close to the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: very close to the answer
|
||||
B: not very close to the answer
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either A or B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
climaqa_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||
|
||||
climaqa_datasets = []
|
||||
|
||||
for _task in climaqa_silver_sets:
|
||||
|
||||
if _task == 'mcq':
|
||||
GRADER_TEMPLATE = GRADER_TEMPLATE_mcq
|
||||
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification. The question is multiple choice with a single correct answer, the final answer must only be the letter corresponding to the correct answer. For example, \"The answer is: a\"\n\nQ: {{input}}\nA: "
|
||||
if _task == 'ffq':
|
||||
GRADER_TEMPLATE = GRADER_TEMPLATE_ffq
|
||||
infer_prompt = f"Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\".\n\nQ: {{input}}\nA: "
|
||||
if _task == 'cloze':
|
||||
GRADER_TEMPLATE = GRADER_TEMPLATE_cloze
|
||||
infer_prompt = f"Fill the <Mask> in the sentence. Think step by step, and when you provide the final answer, please use the prefix \"The answer is:\"without any modification, and provide the answer directly, with no formatting, no bolding, and no markup. For instance: \"The answer is: 42\" or \"The answer is: yes\".\n\nQ: {{input}}\nA: "
|
||||
|
||||
climaqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=infer_prompt,
|
||||
)
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
climaqa_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=ClimaQADataset,
|
||||
path='opencompass/ClimaQA-Silver',
|
||||
task=_task,
|
||||
abbr='ClimaQA_Silver_' + _task,
|
||||
reader_cfg=climaqa_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
climaqa_datasets.append(
|
||||
dict(
|
||||
abbr='ClimaQA_Silver_' + _task,
|
||||
type=ClimaQADataset,
|
||||
path='opencompass/ClimaQA-Silver',
|
||||
task=_task,
|
||||
reader_cfg=climaqa_reader_cfg,
|
||||
infer_cfg=climaqa_infer_cfg,
|
||||
eval_cfg=climaqa_eval_cfg,
|
||||
)
|
||||
)
|
||||
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .PHYSICS_llm_judge_gen_a133a2 import physics_datasets # noqa: F401, F403
|
@ -0,0 +1,131 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (
|
||||
PHYSICSDataset,
|
||||
generic_llmjudge_postprocess,
|
||||
)
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
physics_sets = [
|
||||
'atomic_dataset_textonly',
|
||||
'electro_dataset_textonly',
|
||||
'mechanics_dataset_textonly',
|
||||
'optics_dataset_textonly',
|
||||
'quantum_dataset_textonly',
|
||||
'statistics_dataset_textonly',
|
||||
]
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some questions may include multiple sub questions and sub answers. Each sub answer is given after a guide character in the form of <Answer 1:> or <Answer 2:>, etc. Please note that only when all sub predictions given in prediction correspond one-to-one with the answer and are all correct, will the prediction be considered correct; otherwise, it will be considered incorrect.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
5. The final answers in the prediction are generally given with \\boxed{}. If you cannot find sufficient \\boxed{} in the prediction, please try to find matching answers from other places within the prediction as much as possible.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: All Sub Predictions Are Correct
|
||||
B: Not Every Sub Predictions is Correct
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either A, B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
# GRADER_TEMPLATE = """
|
||||
# Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
#
|
||||
# Here are some evaluation criteria:
|
||||
# 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
# 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
# 3. Some questions may include multiple sub questions and sub answers. Each sub answer is given after a guide character in the form of <Answer 1:> or <Answer 2:>, etc. Please note that as long as at least one correct answer appears in the prediction, the prediction is considered correct.
|
||||
# 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
# 5. The final answers in the prediction are generally given with \\boxed{}. If you cannot find sufficient \\boxed{} in the prediction, please try to find matching answers from other places within the prediction as much as possible.
|
||||
#
|
||||
# Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
# A: At Least One Sub Prediction is Correct
|
||||
# B: All Sub Predictions are Incorrect
|
||||
# Just return the letters "A" or "B", with no text around it.
|
||||
#
|
||||
# Here is your task. Simply reply with either A, B. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
#
|
||||
# <Original Question Begin>: \n{input}\n<Original Question End>\n\n
|
||||
# <Gold Target Begin>: \n{target}\n<Gold Target End>\n\n
|
||||
# <Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
#
|
||||
# Judging the correctness of candidates' answers:
|
||||
# """.strip()
|
||||
|
||||
physics_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||
|
||||
physics_datasets = []
|
||||
|
||||
for _name in physics_sets:
|
||||
|
||||
physics_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=f'Answer the given question step by step. Begin by explaining your reasoning process clearly. Conclude by providing the final answers at the end in LaTeX boxed format. Think step by step before answering. It should be noted that the question may include multiple sub questions, please ensure that each question is answered in order.\n\nQ: {{input}}\nA: ',
|
||||
)
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
physics_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=PHYSICSDataset,
|
||||
path='opencompass/PHYSICS-textonly',
|
||||
abbr='PHYSICS_' + _name,
|
||||
name=_name,
|
||||
reader_cfg=physics_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
physics_datasets.append(
|
||||
dict(
|
||||
abbr='PHYSICS_' + _name,
|
||||
type=PHYSICSDataset,
|
||||
path='opencompass/PHYSICS-textonly',
|
||||
name=_name,
|
||||
reader_cfg=physics_reader_cfg,
|
||||
infer_cfg=physics_infer_cfg,
|
||||
eval_cfg=physics_eval_cfg,
|
||||
)
|
||||
)
|
||||
|
14
opencompass/configs/summarizers/groups/PHYSICS.py
Normal file
14
opencompass/configs/summarizers/groups/PHYSICS.py
Normal file
@ -0,0 +1,14 @@
|
||||
physics_summary_groups = []
|
||||
|
||||
# bbh
|
||||
_physcis = [
|
||||
'atomic_dataset_textonly',
|
||||
'electro_dataset_textonly',
|
||||
'mechanics_dataset_textonly',
|
||||
'optics_dataset_textonly',
|
||||
'quantum_dataset_textonly',
|
||||
'statistics_dataset_textonly',
|
||||
]
|
||||
|
||||
_physcis = ['PHYSICS_' + s for s in _physcis]
|
||||
physics_summary_groups.append({'name': 'PHYSICS', 'subsets': _physcis})
|
@ -25,6 +25,7 @@ from .chinese_simpleqa import * # noqa: F401, F403
|
||||
from .cibench import * # noqa: F401, F403
|
||||
from .circular import * # noqa: F401, F403
|
||||
from .civilcomments import * # noqa: F401, F403
|
||||
from .climaqa import * # noqa: F401, F403
|
||||
from .clozeTest_maxmin import * # noqa: F401, F403
|
||||
from .cluewsc import * # noqa: F401, F403
|
||||
from .cmb import * # noqa: F401, F403
|
||||
@ -110,6 +111,7 @@ from .obqa import * # noqa: F401, F403
|
||||
from .olymmath import * # noqa: F401, F403
|
||||
from .OlympiadBench import * # noqa: F401, F403
|
||||
from .OpenFinData import * # noqa: F401, F403
|
||||
from .physics import * # noqa: F401, F403
|
||||
from .piqa import * # noqa: F401, F403
|
||||
from .py150 import * # noqa: F401, F403
|
||||
from .qasper import * # noqa: F401, F403
|
||||
|
30
opencompass/datasets/climaqa.py
Normal file
30
opencompass/datasets/climaqa.py
Normal file
@ -0,0 +1,30 @@
|
||||
import os
|
||||
|
||||
from datasets import load_dataset
|
||||
|
||||
from opencompass.datasets.base import BaseDataset
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
from opencompass.utils import get_data_path
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class ClimaQADataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str, task: str, **kwargs):
|
||||
|
||||
path = get_data_path(path)
|
||||
path = os.path.join(path, task)
|
||||
climateqa = load_dataset(path)['train']
|
||||
|
||||
input_column = []
|
||||
for i in range(len(climateqa)):
|
||||
if 'Options' in climateqa[i].keys(
|
||||
) and climateqa[i]['Options'] is not None:
|
||||
input_column.append(climateqa[i]['Question'] + '\n' +
|
||||
climateqa[i]['Options'])
|
||||
else:
|
||||
input_column.append(climateqa[i]['Question'])
|
||||
climateqa = climateqa.add_column(name='input', column=input_column)
|
||||
climateqa = climateqa.rename_column('Answer', 'target')
|
||||
return climateqa
|
30
opencompass/datasets/physics.py
Normal file
30
opencompass/datasets/physics.py
Normal file
@ -0,0 +1,30 @@
|
||||
import os
|
||||
|
||||
from datasets import load_dataset
|
||||
|
||||
from opencompass.datasets.base import BaseDataset
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
from opencompass.utils import get_data_path
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class PHYSICSDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str, name: str, **kwargs):
|
||||
path = get_data_path(path)
|
||||
path = os.path.join(path, name)
|
||||
physics = load_dataset(path)['train']
|
||||
physics = physics.rename_column('questions', 'input')
|
||||
|
||||
target = []
|
||||
for i in physics:
|
||||
this_final_answer = ''
|
||||
for j in range(len(i['final_answers'])):
|
||||
this_final_answer += 'Answer ' + str(j + 1) + ': '
|
||||
this_final_answer += i['final_answers'][j]
|
||||
this_final_answer += '\n'
|
||||
target.append(this_final_answer)
|
||||
physics = physics.add_column(name='target', column=target)
|
||||
|
||||
return physics
|
@ -420,9 +420,40 @@ DATASETS_MAPPING = {
|
||||
"hf_id": "",
|
||||
"local": "./data/OlympiadBench",
|
||||
},
|
||||
"opencompass/ClimaQA-Gold": {
|
||||
"ms_id": "",
|
||||
"hf_id": "",
|
||||
"local": "./data/climaqa_gold",
|
||||
},
|
||||
"opencompass/ClimaQA-Silver": {
|
||||
"ms_id": "",
|
||||
"hf_id": "",
|
||||
"local": "./data/climaqa_silver",
|
||||
},
|
||||
"opencompass/PHYSICS-textonly": {
|
||||
"ms_id": "",
|
||||
"hf_id": "",
|
||||
"local": "./data/PHYSICS-textonly",
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
DATASETS_URL = {
|
||||
"/climaqa_gold": {
|
||||
"url":
|
||||
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/climaqa_gold.zip",
|
||||
"md5": "310cd0dc96db2bbbce798c40e2163ac2",
|
||||
},
|
||||
"/climaqa_silver": {
|
||||
"url":
|
||||
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/climaqa_silver.zip",
|
||||
"md5": "acdd955f1c170539c5233c12f7227c58",
|
||||
},
|
||||
"/PHYSICS-textonly": {
|
||||
"url":
|
||||
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/PHYSICS-textonly.zip",
|
||||
"md5": "92be6846a22dd4da942ca43f0638c709",
|
||||
},
|
||||
"/OlympiadBench": {
|
||||
"url":
|
||||
"http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/OlympiadBench.zip",
|
||||
|
Loading…
Reference in New Issue
Block a user