mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
fix datasets in fullbench_int3
This commit is contained in:
parent
4b9838fb94
commit
60b230e285
@ -1,4 +1,39 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import Aime2024Dataset, MATHEvaluator, math_postprocess_v2
|
||||
|
||||
with read_base():
|
||||
from .aime2024_gen_6e39a4 import aime2024_datasets # noqa: F401, F403
|
||||
|
||||
aime2024_reader_cfg = dict(
|
||||
input_columns=['question'],
|
||||
output_column='answer'
|
||||
)
|
||||
|
||||
|
||||
aime2024_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{question}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
|
||||
],
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=2048)
|
||||
)
|
||||
|
||||
aime2024_eval_cfg = dict(
|
||||
evaluator=dict(type=MATHEvaluator, version='v2'), pred_postprocessor=dict(type=math_postprocess_v2)
|
||||
)
|
||||
|
||||
aime2024_datasets = [
|
||||
dict(
|
||||
abbr='aime2024',
|
||||
type=Aime2024Dataset,
|
||||
path='opencompass/aime2024',
|
||||
reader_cfg=aime2024_reader_cfg,
|
||||
infer_cfg=aime2024_infer_cfg,
|
||||
eval_cfg=aime2024_eval_cfg
|
||||
)
|
||||
]
|
@ -0,0 +1,90 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import Aime2024Dataset, MATHEvaluator, math_postprocess_v2
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
|
||||
|
||||
aime2024_reader_cfg = dict(
|
||||
input_columns=['question'],
|
||||
output_column='answer'
|
||||
)
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {question}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
|
||||
aime2024_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{question}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
|
||||
],
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=2048)
|
||||
)
|
||||
|
||||
aime2024_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=Aime2024Dataset,
|
||||
path='opencompass/aime2024',
|
||||
reader_cfg=aime2024_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
aime2024_datasets = [
|
||||
dict(
|
||||
abbr='aime2024',
|
||||
type=Aime2024Dataset,
|
||||
path='opencompass/aime2024',
|
||||
reader_cfg=aime2024_reader_cfg,
|
||||
infer_cfg=aime2024_infer_cfg,
|
||||
eval_cfg=aime2024_eval_cfg
|
||||
)
|
||||
]
|
@ -1,4 +1,100 @@
|
||||
from mmengine.config import read_base
|
||||
import os
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import BBHDataset, BBHEvaluator, bbh_mcq_postprocess, BBHEvaluator_mcq
|
||||
|
||||
bbh_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||
|
||||
bbh_multiple_choice_sets = [
|
||||
'temporal_sequences',
|
||||
'disambiguation_qa',
|
||||
'date_understanding',
|
||||
'tracking_shuffled_objects_three_objects',
|
||||
'penguins_in_a_table',
|
||||
'geometric_shapes',
|
||||
'snarks',
|
||||
'ruin_names',
|
||||
'tracking_shuffled_objects_seven_objects',
|
||||
'tracking_shuffled_objects_five_objects',
|
||||
'logical_deduction_three_objects',
|
||||
'hyperbaton',
|
||||
'logical_deduction_five_objects',
|
||||
'logical_deduction_seven_objects',
|
||||
'movie_recommendation',
|
||||
'salient_translation_error_detection',
|
||||
'reasoning_about_colored_objects',
|
||||
]
|
||||
bbh_free_form_sets = [
|
||||
'multistep_arithmetic_two',
|
||||
'navigate',
|
||||
'dyck_languages',
|
||||
'word_sorting',
|
||||
'sports_understanding',
|
||||
'boolean_expressions',
|
||||
'object_counting',
|
||||
'formal_fallacies',
|
||||
'causal_judgement',
|
||||
'web_of_lies',
|
||||
]
|
||||
|
||||
bbh_datasets = []
|
||||
for _name in bbh_multiple_choice_sets:
|
||||
with open(os.path.join(os.path.dirname(__file__), 'lib_prompt', f'{_name}.txt'), 'r') as f:
|
||||
_hint = f.read()
|
||||
bbh_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=
|
||||
f"Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: Let's think step by step."
|
||||
)
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||
bbh_eval_cfg = dict(
|
||||
evaluator=dict(type=BBHEvaluator_mcq),
|
||||
pred_role='BOT',
|
||||
pred_postprocessor=dict(type=bbh_mcq_postprocess),
|
||||
dataset_postprocessor=dict(type=bbh_mcq_postprocess))
|
||||
|
||||
bbh_datasets.append(
|
||||
dict(
|
||||
type=BBHDataset,
|
||||
path='opencompass/bbh',
|
||||
name=_name,
|
||||
abbr='bbh-' + _name,
|
||||
reader_cfg=bbh_reader_cfg,
|
||||
infer_cfg=bbh_infer_cfg.copy(),
|
||||
eval_cfg=bbh_eval_cfg.copy()))
|
||||
|
||||
for _name in bbh_free_form_sets:
|
||||
with open(os.path.join(os.path.dirname(__file__), 'lib_prompt', f'{_name}.txt'), 'r') as f:
|
||||
_hint = f.read()
|
||||
bbh_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=
|
||||
f"Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: Let's think step by step."
|
||||
)
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||
bbh_eval_cfg = dict(evaluator=dict(type=BBHEvaluator), pred_role='BOT')
|
||||
|
||||
bbh_datasets.append(
|
||||
dict(
|
||||
type=BBHDataset,
|
||||
path='opencompass/bbh',
|
||||
name=_name,
|
||||
abbr='bbh-' + _name,
|
||||
reader_cfg=bbh_reader_cfg,
|
||||
infer_cfg=bbh_infer_cfg.copy(),
|
||||
eval_cfg=bbh_eval_cfg.copy()))
|
||||
|
||||
with read_base():
|
||||
from .bbh_gen_5b92b0 import bbh_datasets # noqa: F401, F403
|
||||
|
180
opencompass/configs/datasets/bbh/bbh_llm_judge_gen.py
Normal file
180
opencompass/configs/datasets/bbh/bbh_llm_judge_gen.py
Normal file
@ -0,0 +1,180 @@
|
||||
import os
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import BBHDataset, BBHEvaluator, bbh_mcq_postprocess, BBHEvaluator_mcq, generic_llmjudge_postprocess
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
|
||||
bbh_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||
|
||||
bbh_multiple_choice_sets = [
|
||||
'temporal_sequences',
|
||||
'disambiguation_qa',
|
||||
'date_understanding',
|
||||
'tracking_shuffled_objects_three_objects',
|
||||
'penguins_in_a_table',
|
||||
'geometric_shapes',
|
||||
'snarks',
|
||||
'ruin_names',
|
||||
'tracking_shuffled_objects_seven_objects',
|
||||
'tracking_shuffled_objects_five_objects',
|
||||
'logical_deduction_three_objects',
|
||||
'hyperbaton',
|
||||
'logical_deduction_five_objects',
|
||||
'logical_deduction_seven_objects',
|
||||
'movie_recommendation',
|
||||
'salient_translation_error_detection',
|
||||
'reasoning_about_colored_objects',
|
||||
]
|
||||
bbh_free_form_sets = [
|
||||
'multistep_arithmetic_two',
|
||||
'navigate',
|
||||
'dyck_languages',
|
||||
'word_sorting',
|
||||
'sports_understanding',
|
||||
'boolean_expressions',
|
||||
'object_counting',
|
||||
'formal_fallacies',
|
||||
'causal_judgement',
|
||||
'web_of_lies',
|
||||
]
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: {question}\n {options_str} \n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
bbh_datasets = []
|
||||
for _name in bbh_multiple_choice_sets:
|
||||
with open(os.path.join(os.path.dirname(__file__), 'lib_prompt', f'{_name}.txt'), 'r') as f:
|
||||
_hint = f.read()
|
||||
bbh_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=
|
||||
f"Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: Let's think step by step."
|
||||
)
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||
bbh_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt = GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=BBHDataset,
|
||||
path='opencompass/bbh',
|
||||
reader_cfg=bbh_reader_cfg,
|
||||
name=_name,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
bbh_datasets.append(
|
||||
dict(
|
||||
type=BBHDataset,
|
||||
path='opencompass/bbh',
|
||||
name=_name,
|
||||
abbr='bbh-' + _name,
|
||||
reader_cfg=bbh_reader_cfg,
|
||||
infer_cfg=bbh_infer_cfg.copy(),
|
||||
eval_cfg=bbh_eval_cfg.copy()))
|
||||
|
||||
for _name in bbh_free_form_sets:
|
||||
with open(os.path.join(os.path.dirname(__file__), 'lib_prompt', f'{_name}.txt'), 'r') as f:
|
||||
_hint = f.read()
|
||||
bbh_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=
|
||||
f"Follow the given examples and answer the question.\n{_hint}\n\nQ: {{input}}\nA: Let's think step by step."
|
||||
)
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||
bbh_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=BBHDataset,
|
||||
path='opencompass/bbh',
|
||||
reader_cfg=bbh_reader_cfg,
|
||||
name=_name,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
bbh_datasets.append(
|
||||
dict(
|
||||
type=BBHDataset,
|
||||
path='opencompass/bbh',
|
||||
name=_name,
|
||||
abbr='bbh-' + _name,
|
||||
reader_cfg=bbh_reader_cfg,
|
||||
infer_cfg=bbh_infer_cfg.copy(),
|
||||
eval_cfg=bbh_eval_cfg.copy()))
|
@ -0,0 +1,46 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (BigCodeBenchDataset, BigCodeBenchEvaluator)
|
||||
|
||||
bigcodebench_hard_reader_cfg = dict(
|
||||
input_columns=['instruct_prompt'],
|
||||
output_column='test',
|
||||
)
|
||||
|
||||
bigcodebench_hard_infer_cfg = dict(prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[dict(role='system', fallback_role='HUMAN', prompt='')],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{instruct_prompt}'),
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer,
|
||||
max_out_len=8192))
|
||||
|
||||
bigcodebench_hard_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=BigCodeBenchEvaluator,
|
||||
release_version='v0.1.2',
|
||||
eval_type='instruct',
|
||||
# remote_execute_api='https://bigcode-bigcodebench-evaluator.hf.space/',
|
||||
remote_execute_api=
|
||||
'https://opencompass-opencompass-bigcodebench-evaluator.hf.space', # noqa: E501
|
||||
dataset_version='hard',
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
bigcodebench_hard_instruct_datasets = [
|
||||
dict(
|
||||
abbr='bigcodebench_hard_instruct',
|
||||
type=BigCodeBenchDataset,
|
||||
path='opencompass/bigcodebench',
|
||||
reader_cfg=bigcodebench_hard_reader_cfg,
|
||||
infer_cfg=bigcodebench_hard_infer_cfg,
|
||||
eval_cfg=bigcodebench_hard_eval_cfg,
|
||||
release_version='v0.1.2',
|
||||
dataset_version='hard',
|
||||
)
|
||||
]
|
@ -1,4 +1,130 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import CMMLUDataset
|
||||
from opencompass.utils.text_postprocessors import match_answer_pattern
|
||||
|
||||
with read_base():
|
||||
from .cmmlu_gen_c13365 import cmmlu_datasets # noqa: F401, F403
|
||||
cmmlu_subject_mapping = {
|
||||
'agronomy': '农学',
|
||||
'anatomy': '解剖学',
|
||||
'ancient_chinese': '古汉语',
|
||||
'arts': '艺术学',
|
||||
'astronomy': '天文学',
|
||||
'business_ethics': '商业伦理',
|
||||
'chinese_civil_service_exam': '中国公务员考试',
|
||||
'chinese_driving_rule': '中国驾驶规则',
|
||||
'chinese_food_culture': '中国饮食文化',
|
||||
'chinese_foreign_policy': '中国外交政策',
|
||||
'chinese_history': '中国历史',
|
||||
'chinese_literature': '中国文学',
|
||||
'chinese_teacher_qualification': '中国教师资格',
|
||||
'clinical_knowledge': '临床知识',
|
||||
'college_actuarial_science': '大学精算学',
|
||||
'college_education': '大学教育学',
|
||||
'college_engineering_hydrology': '大学工程水文学',
|
||||
'college_law': '大学法律',
|
||||
'college_mathematics': '大学数学',
|
||||
'college_medical_statistics': '大学医学统计',
|
||||
'college_medicine': '大学医学',
|
||||
'computer_science': '计算机科学',
|
||||
'computer_security': '计算机安全',
|
||||
'conceptual_physics': '概念物理学',
|
||||
'construction_project_management': '建设工程管理',
|
||||
'economics': '经济学',
|
||||
'education': '教育学',
|
||||
'electrical_engineering': '电气工程',
|
||||
'elementary_chinese': '小学语文',
|
||||
'elementary_commonsense': '小学常识',
|
||||
'elementary_information_and_technology': '小学信息技术',
|
||||
'elementary_mathematics': '初等数学',
|
||||
'ethnology': '民族学',
|
||||
'food_science': '食品科学',
|
||||
'genetics': '遗传学',
|
||||
'global_facts': '全球事实',
|
||||
'high_school_biology': '高中生物',
|
||||
'high_school_chemistry': '高中化学',
|
||||
'high_school_geography': '高中地理',
|
||||
'high_school_mathematics': '高中数学',
|
||||
'high_school_physics': '高中物理学',
|
||||
'high_school_politics': '高中政治',
|
||||
'human_sexuality': '人类性行为',
|
||||
'international_law': '国际法学',
|
||||
'journalism': '新闻学',
|
||||
'jurisprudence': '法理学',
|
||||
'legal_and_moral_basis': '法律与道德基础',
|
||||
'logical': '逻辑学',
|
||||
'machine_learning': '机器学习',
|
||||
'management': '管理学',
|
||||
'marketing': '市场营销',
|
||||
'marxist_theory': '马克思主义理论',
|
||||
'modern_chinese': '现代汉语',
|
||||
'nutrition': '营养学',
|
||||
'philosophy': '哲学',
|
||||
'professional_accounting': '专业会计',
|
||||
'professional_law': '专业法学',
|
||||
'professional_medicine': '专业医学',
|
||||
'professional_psychology': '专业心理学',
|
||||
'public_relations': '公共关系',
|
||||
'security_study': '安全研究',
|
||||
'sociology': '社会学',
|
||||
'sports_science': '体育学',
|
||||
'traditional_chinese_medicine': '中医中药',
|
||||
'virology': '病毒学',
|
||||
'world_history': '世界历史',
|
||||
'world_religions': '世界宗教'
|
||||
}
|
||||
|
||||
QUERY_TEMPLATE = """
|
||||
你回答的最后一行**必须**是以下格式 '答案: $选项' (不带引号), 其中选项是ABCD之一. 请在回答之前一步步思考.
|
||||
|
||||
{question}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
cmmlu_all_sets = list(cmmlu_subject_mapping.keys())
|
||||
|
||||
cmmlu_datasets = []
|
||||
for _name in cmmlu_all_sets:
|
||||
_ch_name = cmmlu_subject_mapping[_name]
|
||||
prompt_prefix = f'请回答以下关于{_ch_name}的单项选择题, '
|
||||
cmmlu_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=prompt_prefix+QUERY_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
cmmlu_eval_cfg = dict(
|
||||
evaluator=dict(type=AccEvaluator),
|
||||
pred_postprocessor=dict(
|
||||
type=match_answer_pattern,
|
||||
# answer_pattern=r'(?i)答案\s*:\s*([A-D])'
|
||||
answer_pattern=r'(?i)答案\s*:\s*[\W]*([A-D])[\W]*',
|
||||
)
|
||||
)
|
||||
cmmlu_datasets.append(
|
||||
dict(
|
||||
type=CMMLUDataset,
|
||||
path='opencompass/cmmlu',
|
||||
name=_name,
|
||||
abbr=f'cmmlu-{_name}',
|
||||
reader_cfg=dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer',
|
||||
train_split='dev',
|
||||
test_split='test'),
|
||||
infer_cfg=cmmlu_infer_cfg,
|
||||
eval_cfg=cmmlu_eval_cfg,
|
||||
))
|
||||
|
||||
del _name, _ch_name
|
184
opencompass/configs/datasets/cmmlu/cmmlu_llm_judge_gen.py
Normal file
184
opencompass/configs/datasets/cmmlu/cmmlu_llm_judge_gen.py
Normal file
@ -0,0 +1,184 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import CMMLUDataset
|
||||
from opencompass.utils.text_postprocessors import match_answer_pattern
|
||||
from opencompass.openicl.icl_evaluator import LMEvaluator
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
|
||||
cmmlu_subject_mapping = {
|
||||
'agronomy': '农学',
|
||||
'anatomy': '解剖学',
|
||||
'ancient_chinese': '古汉语',
|
||||
'arts': '艺术学',
|
||||
'astronomy': '天文学',
|
||||
'business_ethics': '商业伦理',
|
||||
'chinese_civil_service_exam': '中国公务员考试',
|
||||
'chinese_driving_rule': '中国驾驶规则',
|
||||
'chinese_food_culture': '中国饮食文化',
|
||||
'chinese_foreign_policy': '中国外交政策',
|
||||
'chinese_history': '中国历史',
|
||||
'chinese_literature': '中国文学',
|
||||
'chinese_teacher_qualification': '中国教师资格',
|
||||
'clinical_knowledge': '临床知识',
|
||||
'college_actuarial_science': '大学精算学',
|
||||
'college_education': '大学教育学',
|
||||
'college_engineering_hydrology': '大学工程水文学',
|
||||
'college_law': '大学法律',
|
||||
'college_mathematics': '大学数学',
|
||||
'college_medical_statistics': '大学医学统计',
|
||||
'college_medicine': '大学医学',
|
||||
'computer_science': '计算机科学',
|
||||
'computer_security': '计算机安全',
|
||||
'conceptual_physics': '概念物理学',
|
||||
'construction_project_management': '建设工程管理',
|
||||
'economics': '经济学',
|
||||
'education': '教育学',
|
||||
'electrical_engineering': '电气工程',
|
||||
'elementary_chinese': '小学语文',
|
||||
'elementary_commonsense': '小学常识',
|
||||
'elementary_information_and_technology': '小学信息技术',
|
||||
'elementary_mathematics': '初等数学',
|
||||
'ethnology': '民族学',
|
||||
'food_science': '食品科学',
|
||||
'genetics': '遗传学',
|
||||
'global_facts': '全球事实',
|
||||
'high_school_biology': '高中生物',
|
||||
'high_school_chemistry': '高中化学',
|
||||
'high_school_geography': '高中地理',
|
||||
'high_school_mathematics': '高中数学',
|
||||
'high_school_physics': '高中物理学',
|
||||
'high_school_politics': '高中政治',
|
||||
'human_sexuality': '人类性行为',
|
||||
'international_law': '国际法学',
|
||||
'journalism': '新闻学',
|
||||
'jurisprudence': '法理学',
|
||||
'legal_and_moral_basis': '法律与道德基础',
|
||||
'logical': '逻辑学',
|
||||
'machine_learning': '机器学习',
|
||||
'management': '管理学',
|
||||
'marketing': '市场营销',
|
||||
'marxist_theory': '马克思主义理论',
|
||||
'modern_chinese': '现代汉语',
|
||||
'nutrition': '营养学',
|
||||
'philosophy': '哲学',
|
||||
'professional_accounting': '专业会计',
|
||||
'professional_law': '专业法学',
|
||||
'professional_medicine': '专业医学',
|
||||
'professional_psychology': '专业心理学',
|
||||
'public_relations': '公共关系',
|
||||
'security_study': '安全研究',
|
||||
'sociology': '社会学',
|
||||
'sports_science': '体育学',
|
||||
'traditional_chinese_medicine': '中医中药',
|
||||
'virology': '病毒学',
|
||||
'world_history': '世界历史',
|
||||
'world_religions': '世界宗教'
|
||||
}
|
||||
|
||||
QUERY_TEMPLATE = """
|
||||
你回答的最后一行**必须**是以下格式 '答案: $选项' (不带引号), 其中选项是ABCD之一.
|
||||
|
||||
{question}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: \n {question}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
cmmlu_all_sets = list(cmmlu_subject_mapping.keys())
|
||||
|
||||
cmmlu_datasets = []
|
||||
for _name in cmmlu_all_sets:
|
||||
_ch_name = cmmlu_subject_mapping[_name]
|
||||
prompt_prefix = f'请回答以下关于{_ch_name}的单项选择题, '
|
||||
cmmlu_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=prompt_prefix + QUERY_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
cmmlu_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=CMMLUDataset,
|
||||
path='opencompass/cmmlu',
|
||||
name=_name,
|
||||
reader_cfg=dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer',
|
||||
train_split='dev',
|
||||
test_split='test'
|
||||
),
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
cmmlu_datasets.append(
|
||||
dict(
|
||||
type=CMMLUDataset,
|
||||
path='opencompass/cmmlu',
|
||||
name=_name,
|
||||
abbr=f'cmmlu-{_name}',
|
||||
reader_cfg=dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer',
|
||||
train_split='dev',
|
||||
test_split='test'),
|
||||
infer_cfg=cmmlu_infer_cfg,
|
||||
eval_cfg=cmmlu_eval_cfg,
|
||||
mode='singlescore',
|
||||
))
|
||||
|
||||
del _name, _ch_name
|
@ -40,10 +40,9 @@ GRADER_TEMPLATE = """
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{solution}\n<Gold Target End>\n\n
|
||||
<Original Question Begin>: {question}\n {options_str} \n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
@ -70,7 +69,6 @@ drop_eval_cfg = dict(
|
||||
dataset_cfg=dict(
|
||||
type=DropOpenAIDataset,
|
||||
path='data/drop_simple_eval/dev.jsonl',
|
||||
file_name = 'test_prm800k_500.json',
|
||||
reader_cfg=drop_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
@ -80,8 +78,6 @@ drop_eval_cfg = dict(
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
drop_datasets = [
|
||||
dict(
|
||||
abbr='drop',
|
||||
|
@ -1,4 +1,52 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import GPQADataset, GPQA_Simple_Eval_postprocess, GPQAEvaluator
|
||||
|
||||
with read_base():
|
||||
from .gpqa_openai_simple_evals_gen_5aeece import gpqa_datasets
|
||||
# openai_simple_eval prompt
|
||||
align_prompt = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering.
|
||||
|
||||
{question}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
gpqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=align_prompt),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
gpqa_eval_cfg = dict(evaluator=dict(type=GPQAEvaluator),
|
||||
pred_postprocessor=dict(type=GPQA_Simple_Eval_postprocess))
|
||||
|
||||
gpqa_datasets = []
|
||||
gpqa_subsets = {
|
||||
# 'extended': 'gpqa_extended.csv',
|
||||
# 'main': 'gpqa_main.csv',
|
||||
'diamond': 'gpqa_diamond.csv'
|
||||
}
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg)
|
||||
)
|
||||
|
105
opencompass/configs/datasets/gpqa/gpqa_llm_judge_gen.py
Normal file
105
opencompass/configs/datasets/gpqa/gpqa_llm_judge_gen.py
Normal file
@ -0,0 +1,105 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import GPQADataset, GPQA_Simple_Eval_postprocess
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
|
||||
# openai_simple_eval prompt
|
||||
align_prompt = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD.
|
||||
|
||||
{question}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {question}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
gpqa_reader_cfg = dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer')
|
||||
|
||||
gpqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=align_prompt),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
gpqa_datasets = []
|
||||
gpqa_subsets = {
|
||||
# 'extended': 'gpqa_extended.csv',
|
||||
# 'main': 'gpqa_main.csv',
|
||||
'diamond': 'gpqa_diamond.csv'
|
||||
}
|
||||
|
||||
for split in list(gpqa_subsets.keys()):
|
||||
gpqa_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
gpqa_datasets.append(
|
||||
dict(
|
||||
abbr='GPQA_' + split,
|
||||
type=GPQADataset,
|
||||
path='./data/gpqa/',
|
||||
name=gpqa_subsets[split],
|
||||
reader_cfg=gpqa_reader_cfg,
|
||||
infer_cfg=gpqa_infer_cfg,
|
||||
eval_cfg=gpqa_eval_cfg,
|
||||
mode='singlescore',
|
||||
)
|
||||
)
|
@ -1,4 +1,58 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import FixKRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
|
||||
from opencompass.datasets import HellaswagDatasetwithICE
|
||||
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||
|
||||
with read_base():
|
||||
from .hellaswag_gen_6faab5 import hellaswag_datasets # noqa: F401, F403
|
||||
hellaswag_reader_cfg = dict(
|
||||
input_columns=['ctx', 'A', 'B', 'C', 'D'],
|
||||
output_column='label',
|
||||
train_split='train',
|
||||
test_split='val',
|
||||
)
|
||||
|
||||
hellaswag_infer_cfg = dict(
|
||||
ice_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=f'{{ctx}}\nA) {{A}}\nB) {{B}}\nC) {{C}}\nD) {{D}}\nWhat is the right option?'),
|
||||
dict(role='BOT', prompt='{label}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(role='HUMAN', prompt='Continue the following text without adding any additional information or formatting:\n'),
|
||||
'</E>',
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=f'{{ctx}}\nA) {{A}}\nB) {{B}}\nC) {{C}}\nD) {{D}}\nWhat is the right option?'),
|
||||
dict(role='BOT', prompt='{label}\n'),
|
||||
],
|
||||
),
|
||||
ice_token='</E>',
|
||||
),
|
||||
retriever=dict(type=FixKRetriever, fix_id_list=list(range(10))),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
hellaswag_eval_cfg = dict(
|
||||
evaluator=dict(type=AccwithDetailsEvaluator),
|
||||
pred_role='BOT',
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
|
||||
)
|
||||
|
||||
hellaswag_datasets = [
|
||||
dict(
|
||||
abbr='hellaswag',
|
||||
type=HellaswagDatasetwithICE,
|
||||
path='opencompass/hellaswag_ice',
|
||||
reader_cfg=hellaswag_reader_cfg,
|
||||
infer_cfg=hellaswag_infer_cfg,
|
||||
eval_cfg=hellaswag_eval_cfg,
|
||||
)
|
||||
]
|
@ -0,0 +1,106 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import FixKRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
|
||||
from opencompass.datasets import HellaswagDatasetwithICE, generic_llmjudge_postprocess
|
||||
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
hellaswag_reader_cfg = dict(
|
||||
input_columns=['ctx', 'A', 'B', 'C', 'D'],
|
||||
output_column='label',
|
||||
train_split='train',
|
||||
test_split='val',
|
||||
)
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {question}\n {options_str} \n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
hellaswag_infer_cfg = dict(
|
||||
ice_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=f'{{ctx}}\nA) {{A}}\nB) {{B}}\nC) {{C}}\nD) {{D}}\nWhat is the right option?'),
|
||||
dict(role='BOT', prompt='{label}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(role='HUMAN', prompt='Continue the following text without adding any additional information or formatting:\n'),
|
||||
'</E>',
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=f'{{ctx}}\nA) {{A}}\nB) {{B}}\nC) {{C}}\nD) {{D}}\nWhat is the right option?'),
|
||||
dict(role='BOT', prompt='{label}\n'),
|
||||
],
|
||||
),
|
||||
ice_token='</E>',
|
||||
),
|
||||
retriever=dict(type=FixKRetriever, fix_id_list=list(range(10))),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
hellaswag_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=HellaswagDatasetwithICE,
|
||||
path='opencompass/hellaswag_ice',
|
||||
reader_cfg=hellaswag_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
hellaswag_datasets = [
|
||||
dict(
|
||||
abbr='hellaswag',
|
||||
type=HellaswagDatasetwithICE,
|
||||
path='opencompass/hellaswag_ice',
|
||||
reader_cfg=hellaswag_reader_cfg,
|
||||
infer_cfg=hellaswag_infer_cfg,
|
||||
eval_cfg=hellaswag_eval_cfg,
|
||||
)
|
||||
]
|
@ -1,4 +1,36 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||
|
||||
with read_base():
|
||||
from .humaneval_gen_8e312c import humaneval_datasets # noqa: F401, F403
|
||||
humaneval_reader_cfg = dict(
|
||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||
|
||||
# TODO: allow empty output-column
|
||||
humaneval_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='Read the following function signature and docstring, and fully implement the function described. Your response should only contain the code for this function.\n{prompt}'),
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
humaneval_eval_cfg = dict(
|
||||
evaluator=dict(type=HumanEvalEvaluator),
|
||||
pred_role='BOT',
|
||||
k=[1, 10, 100], # the parameter only for humaneval
|
||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||
)
|
||||
|
||||
humaneval_datasets = [
|
||||
dict(
|
||||
abbr='openai_humaneval',
|
||||
type=HumanevalDataset,
|
||||
path='opencompass/humaneval',
|
||||
reader_cfg=humaneval_reader_cfg,
|
||||
infer_cfg=humaneval_infer_cfg,
|
||||
eval_cfg=humaneval_eval_cfg)
|
||||
]
|
60
opencompass/configs/datasets/korbench/korbench_gen.py
Normal file
60
opencompass/configs/datasets/korbench/korbench_gen.py
Normal file
@ -0,0 +1,60 @@
|
||||
from opencompass.datasets.korbench.korbench import korbenchDataset, korbenchEvaluator
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
|
||||
categories = ['cipher', 'counterfactual', 'logic', 'operation', 'puzzle']
|
||||
|
||||
korbench_0shot_single_datasets = []
|
||||
|
||||
for category in categories:
|
||||
# Prompt template
|
||||
prompt_template = dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=''
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}' # f-string
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
# Reader configuration
|
||||
reader_cfg = dict(
|
||||
input_columns=['prompt'],
|
||||
output_column='answer',
|
||||
)
|
||||
|
||||
# Inference configuration
|
||||
infer_cfg = dict(
|
||||
prompt_template=prompt_template,
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||
)
|
||||
|
||||
# Evaluation configuration
|
||||
eval_cfg = dict(
|
||||
evaluator=dict(type=korbenchEvaluator),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
korbench_dataset = dict(
|
||||
type=korbenchDataset,
|
||||
abbr=f'korbench_{category}',
|
||||
path='opencompass/korbench',
|
||||
prompt_mode='0_shot',
|
||||
category=category,
|
||||
reader_cfg=reader_cfg,
|
||||
infer_cfg=infer_cfg,
|
||||
eval_cfg=eval_cfg,
|
||||
)
|
||||
|
||||
korbench_0shot_single_datasets.append(korbench_dataset)
|
113
opencompass/configs/datasets/korbench/korbench_llm_judge_gen.py
Normal file
113
opencompass/configs/datasets/korbench/korbench_llm_judge_gen.py
Normal file
@ -0,0 +1,113 @@
|
||||
from opencompass.datasets.korbench.korbench import korbenchDataset, korbenchEvaluator
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
|
||||
categories = ['cipher', 'counterfactual', 'logic', 'operation', 'puzzle']
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {question}\n {options_str} \n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
|
||||
korbench_0shot_single_datasets = []
|
||||
|
||||
for category in categories:
|
||||
# Prompt template
|
||||
prompt_template = dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=''
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}' # f-string
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
# Reader configuration
|
||||
reader_cfg = dict(
|
||||
input_columns=['prompt'],
|
||||
output_column='answer',
|
||||
)
|
||||
|
||||
# Inference configuration
|
||||
infer_cfg = dict(
|
||||
prompt_template=prompt_template,
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||
)
|
||||
|
||||
# Evaluation configuration
|
||||
eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=korbenchDataset,
|
||||
path='opencompass/korbench',
|
||||
reader_cfg=reader_cfg,
|
||||
prompt_mode='0_shot',
|
||||
category=category,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
korbench_dataset = dict(
|
||||
type=korbenchDataset,
|
||||
abbr=f'korbench_{category}',
|
||||
path='opencompass/korbench',
|
||||
prompt_mode='0_shot',
|
||||
category=category,
|
||||
reader_cfg=reader_cfg,
|
||||
infer_cfg=infer_cfg,
|
||||
eval_cfg=eval_cfg,
|
||||
)
|
||||
|
||||
korbench_0shot_single_datasets.append(korbench_dataset)
|
@ -1,4 +1,164 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (
|
||||
LCBCodeGenerationDataset,
|
||||
LCBCodeExecutionDataset,
|
||||
LCBTestOutputPredictionDataset,
|
||||
LCBCodeGenerationEvaluator,
|
||||
LCBCodeExecutionEvaluator,
|
||||
LCBTestOutputEvaluator
|
||||
)
|
||||
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||
|
||||
with read_base():
|
||||
from .livecodebench_gen_6966bc import LCB_datasets # noqa: F401, F403
|
||||
|
||||
lcb_code_generation_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question_content',
|
||||
'format_prompt',
|
||||
],
|
||||
# output_column='evaluation_sample',
|
||||
output_column='question_id',
|
||||
)
|
||||
|
||||
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||
'### Answer: (use the provided format with backticks)\n\n'
|
||||
|
||||
|
||||
# Code Generation Tasks
|
||||
lcb_code_generation_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=prompt_template
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer)
|
||||
)
|
||||
|
||||
lcb_code_generation_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeGenerationEvaluator,
|
||||
num_process_evaluate=4,
|
||||
timeout=6,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeGeneration_dataset = dict(
|
||||
type=LCBCodeGenerationDataset,
|
||||
abbr='lcb_code_generation',
|
||||
path='opencompass/code_generation_lite',
|
||||
reader_cfg=lcb_code_generation_reader_cfg,
|
||||
infer_cfg=lcb_code_generation_infer_cfg,
|
||||
eval_cfg=lcb_code_generation_eval_cfg
|
||||
)
|
||||
|
||||
# Code Execution Dataset
|
||||
lcb_code_execution_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
lcb_code_execution_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.'
|
||||
),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer)
|
||||
)
|
||||
|
||||
lcb_code_execution_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeExecutionEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeExecution_dataset = dict(
|
||||
type=LCBCodeExecutionDataset,
|
||||
abbr='lcb_code_execution',
|
||||
path='opencompass/execution-v2',
|
||||
reader_cfg=lcb_code_execution_reader_cfg,
|
||||
infer_cfg=lcb_code_execution_infer_cfg,
|
||||
eval_cfg=lcb_code_execution_eval_cfg,
|
||||
)
|
||||
|
||||
# TestOuputput Dataset
|
||||
lcb_test_output_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
lcb_test_output_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
# begin=[
|
||||
# dict(
|
||||
# role='SYSTEM',
|
||||
# prompt=system_prompt
|
||||
# ),
|
||||
# ],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer)
|
||||
)
|
||||
|
||||
lcb_test_output_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBTestOutputEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBTestOutput_dataset = dict(
|
||||
type=LCBTestOutputPredictionDataset,
|
||||
abbr='lcb_test_output',
|
||||
path='opencompass/test_generation',
|
||||
reader_cfg=lcb_test_output_reader_cfg,
|
||||
infer_cfg=lcb_test_output_infer_cfg,
|
||||
eval_cfg=lcb_test_output_eval_cfg,
|
||||
)
|
||||
|
||||
LCB_datasets = [
|
||||
LCBCodeGeneration_dataset,
|
||||
LCBCodeExecution_dataset,
|
||||
LCBTestOutput_dataset,
|
||||
]
|
||||
|
@ -1,7 +1,12 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import MATHDataset, MATHEvaluator, math_postprocess_v2, normalize_final_answer
|
||||
from opencompass.datasets import (
|
||||
MATHDataset,
|
||||
MATHEvaluator,
|
||||
math_postprocess_v2,
|
||||
normalize_final_answer,
|
||||
)
|
||||
|
||||
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
|
||||
|
||||
@ -10,17 +15,21 @@ math_infer_cfg = dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{problem}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{problem}\nPlease reason step by step, and put your final answer within \\boxed{}.',
|
||||
),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
# postprocess v2
|
||||
math_eval_cfg = dict(
|
||||
evaluator=dict(type=MATHEvaluator, version='v2'), pred_postprocessor=dict(type=math_postprocess_v2),
|
||||
evaluator=dict(type=MATHEvaluator, version='v2'),
|
||||
pred_postprocessor=dict(type=math_postprocess_v2),
|
||||
)
|
||||
|
||||
math_datasets = [
|
||||
|
@ -0,0 +1,36 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import MATHDataset, MATHEvaluator, math_postprocess_v2, normalize_final_answer
|
||||
|
||||
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
|
||||
|
||||
math_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{problem}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||
)
|
||||
|
||||
# postprocess v2
|
||||
math_eval_cfg = dict(
|
||||
evaluator=dict(type=MATHEvaluator, version='v2'), pred_postprocessor=dict(type=math_postprocess_v2),
|
||||
)
|
||||
|
||||
math_datasets = [
|
||||
dict(
|
||||
type=MATHDataset,
|
||||
abbr='math_prm800k_500',
|
||||
path='opencompass/math',
|
||||
file_name = 'test_prm800k_500.json',
|
||||
reader_cfg=math_reader_cfg,
|
||||
infer_cfg=math_infer_cfg,
|
||||
eval_cfg=math_eval_cfg,
|
||||
)
|
||||
]
|
@ -0,0 +1,91 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
from opencompass.datasets import MATHDataset
|
||||
|
||||
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
|
||||
|
||||
math_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{problem}\nRemember to put your final answer within \\boxed{}.'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=8192),
|
||||
)
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{solution}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
# Evaluation configuration
|
||||
math_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MATHDataset,
|
||||
path='opencompass/math',
|
||||
file_name='test_prm800k_500.json',
|
||||
reader_cfg=math_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
math_datasets = [
|
||||
dict(
|
||||
type=MATHDataset,
|
||||
abbr='math_prm800k_500-llmjudge',
|
||||
path='opencompass/math',
|
||||
file_name='test_prm800k_500.json',
|
||||
reader_cfg=math_reader_cfg,
|
||||
infer_cfg=math_infer_cfg,
|
||||
eval_cfg=math_eval_cfg,
|
||||
mode='singlescore',
|
||||
)
|
||||
]
|
@ -1,4 +1,59 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import MMLUDataset
|
||||
from opencompass.utils.text_postprocessors import match_answer_pattern
|
||||
|
||||
with read_base():
|
||||
from .mmlu_gen_4d595a import mmlu_datasets # noqa: F401, F403
|
||||
from .mmlu_all_sets import mmlu_all_sets
|
||||
|
||||
# None of the mmlu dataset in huggingface is correctly parsed, so we use our own dataset reader
|
||||
# Please download the dataset from https://people.eecs.berkeley.edu/~hendrycks/data.tar
|
||||
|
||||
QUERY_TEMPLATE = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering.
|
||||
|
||||
{input}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
mmlu_reader_cfg = dict(
|
||||
input_columns=['input', 'A', 'B', 'C', 'D'],
|
||||
output_column='target',
|
||||
train_split='dev')
|
||||
|
||||
mmlu_datasets = []
|
||||
for name in mmlu_all_sets:
|
||||
mmlu_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=QUERY_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
mmlu_eval_cfg = dict(
|
||||
evaluator=dict(type=AccEvaluator),
|
||||
pred_postprocessor=dict(type=match_answer_pattern, answer_pattern=r'(?i)ANSWER\s*:\s*([A-D])'))
|
||||
|
||||
mmlu_datasets.append(
|
||||
dict(
|
||||
abbr=f'lukaemon_mmlu_{name}',
|
||||
type=MMLUDataset,
|
||||
path='opencompass/mmlu',
|
||||
name=name,
|
||||
reader_cfg=mmlu_reader_cfg,
|
||||
infer_cfg=mmlu_infer_cfg,
|
||||
eval_cfg=mmlu_eval_cfg,
|
||||
))
|
110
opencompass/configs/datasets/mmlu/mmlu_llm_judge_gen.py
Normal file
110
opencompass/configs/datasets/mmlu/mmlu_llm_judge_gen.py
Normal file
@ -0,0 +1,110 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import MMLUDataset
|
||||
from opencompass.utils.text_postprocessors import match_answer_pattern
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
|
||||
with read_base():
|
||||
from .mmlu_all_sets import mmlu_all_sets
|
||||
|
||||
# None of the mmlu dataset in huggingface is correctly parsed, so we use our own dataset reader
|
||||
# Please download the dataset from https://people.eecs.berkeley.edu/~hendrycks/data.tar
|
||||
|
||||
QUERY_TEMPLATE = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering.
|
||||
|
||||
{input}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {question}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
mmlu_reader_cfg = dict(
|
||||
input_columns=['input', 'A', 'B', 'C', 'D'],
|
||||
output_column='target',
|
||||
train_split='dev')
|
||||
|
||||
mmlu_datasets = []
|
||||
for name in mmlu_all_sets:
|
||||
mmlu_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=QUERY_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
mmlu_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MMLUDataset,
|
||||
path='opencompass/mmlu',
|
||||
name=name,
|
||||
reader_cfg=mmlu_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
mmlu_datasets.append(
|
||||
dict(
|
||||
abbr=f'lukaemon_mmlu_{name}',
|
||||
type=MMLUDataset,
|
||||
path='opencompass/mmlu',
|
||||
name=name,
|
||||
reader_cfg=mmlu_reader_cfg,
|
||||
infer_cfg=mmlu_infer_cfg,
|
||||
eval_cfg=mmlu_eval_cfg,
|
||||
))
|
64
opencompass/configs/datasets/mmlu_pro/mmlu_pro_gen.py
Normal file
64
opencompass/configs/datasets/mmlu_pro/mmlu_pro_gen.py
Normal file
@ -0,0 +1,64 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import MMLUProDataset
|
||||
from opencompass.utils.text_postprocessors import match_answer_pattern
|
||||
|
||||
with read_base():
|
||||
from .mmlu_pro_categories import categories
|
||||
|
||||
|
||||
|
||||
QUERY_TEMPLATE = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering.
|
||||
|
||||
Question:\n
|
||||
{question}
|
||||
|
||||
Options:\n
|
||||
{options_str}
|
||||
|
||||
""".strip()
|
||||
|
||||
mmlu_pro_datasets = []
|
||||
|
||||
for category in categories:
|
||||
mmlu_pro_reader_cfg = dict(
|
||||
input_columns=['question', 'cot_content', 'options_str'],
|
||||
output_column='answer',
|
||||
train_split='validation',
|
||||
test_split='test',
|
||||
)
|
||||
mmlu_pro_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN',
|
||||
prompt=QUERY_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
mmlu_pro_eval_cfg = dict(
|
||||
evaluator=dict(type=AccEvaluator),
|
||||
pred_postprocessor=dict(
|
||||
type=match_answer_pattern,
|
||||
answer_pattern=r'(?i)ANSWER\s*:\s*([A-P])')
|
||||
)
|
||||
|
||||
mmlu_pro_datasets.append(
|
||||
dict(
|
||||
abbr=f'mmlu_pro_{category.replace(" ", "_")}',
|
||||
type=MMLUProDataset,
|
||||
path='opencompass/mmlu_pro',
|
||||
category=category,
|
||||
reader_cfg=mmlu_pro_reader_cfg,
|
||||
infer_cfg=mmlu_pro_infer_cfg,
|
||||
eval_cfg=mmlu_pro_eval_cfg,
|
||||
))
|
105
opencompass/configs/datasets/mmlu_pro/mmlu_pro_llm_judge_gen.py
Normal file
105
opencompass/configs/datasets/mmlu_pro/mmlu_pro_llm_judge_gen.py
Normal file
@ -0,0 +1,105 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
from opencompass.datasets import MMLUProDataset, generic_llmjudge_postprocess
|
||||
|
||||
with read_base():
|
||||
from .mmlu_pro_categories import categories
|
||||
|
||||
QUERY_TEMPLATE = """
|
||||
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering.
|
||||
|
||||
Question:\n
|
||||
{question}
|
||||
|
||||
Options:\n
|
||||
{options_str}
|
||||
|
||||
""".strip()
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {question}\n {options_str} \n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
mmlu_pro_datasets = []
|
||||
|
||||
for category in categories:
|
||||
mmlu_pro_reader_cfg = dict(
|
||||
input_columns=['question', 'cot_content', 'options_str'],
|
||||
output_column='answer',
|
||||
train_split='validation',
|
||||
test_split='test',
|
||||
)
|
||||
mmlu_pro_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=QUERY_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
mmlu_pro_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MMLUProDataset,
|
||||
path='opencompass/mmlu_pro',
|
||||
category=category,
|
||||
reader_cfg=mmlu_pro_reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
)
|
||||
|
||||
mmlu_pro_datasets.append(
|
||||
dict(
|
||||
abbr=f'mmlu_pro_{category.replace(" ", "_")}',
|
||||
type=MMLUProDataset,
|
||||
path='opencompass/mmlu_pro',
|
||||
category=category,
|
||||
reader_cfg=mmlu_pro_reader_cfg,
|
||||
infer_cfg=mmlu_pro_infer_cfg,
|
||||
eval_cfg=mmlu_pro_eval_cfg,
|
||||
)
|
||||
)
|
@ -1,4 +1,135 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.datasets import MusrDataset, MusrEvaluator
|
||||
from opencompass.openicl import PromptTemplate, ZeroRetriever, GenInferencer
|
||||
|
||||
with read_base():
|
||||
from .musr_gen_b47fd3 import musr_datasets # noqa: F401, F403
|
||||
|
||||
DATASET_CONFIGS = {
|
||||
'murder_mysteries': {
|
||||
'abbr': 'musr_murder_mysteries',
|
||||
'name': 'murder_mysteries',
|
||||
'path': 'opencompass/musr',
|
||||
'reader_cfg': dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='{system_prompt}'
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(
|
||||
type=MusrEvaluator,
|
||||
answer_index_modifier=1,
|
||||
self_consistency_n=1
|
||||
),
|
||||
),
|
||||
},
|
||||
'object_placements': {
|
||||
'abbr': 'musr_object_placements',
|
||||
'name': 'object_placements',
|
||||
'path': 'opencompass/musr',
|
||||
'reader_cfg': dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='{system_prompt}'
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(
|
||||
type=MusrEvaluator,
|
||||
answer_index_modifier=1,
|
||||
self_consistency_n=1
|
||||
),
|
||||
),
|
||||
},
|
||||
'team_allocation': {
|
||||
'abbr': 'musr_team_allocation',
|
||||
'name': 'team_allocation',
|
||||
'path': 'opencompass/musr',
|
||||
'reader_cfg': dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='{system_prompt}'
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(
|
||||
type=MusrEvaluator,
|
||||
answer_index_modifier=1,
|
||||
self_consistency_n=1
|
||||
),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
musr_datasets = []
|
||||
|
||||
for config in DATASET_CONFIGS.values():
|
||||
dataset = dict(
|
||||
abbr=config['abbr'],
|
||||
type=MusrDataset,
|
||||
path=config['path'],
|
||||
name=config['name'],
|
||||
reader_cfg=config['reader_cfg'],
|
||||
infer_cfg=config['infer_cfg'],
|
||||
eval_cfg=config['eval_cfg'],
|
||||
)
|
||||
musr_datasets.append(dataset)
|
||||
|
238
opencompass/configs/datasets/musr/musr_llm_judge_gen.py
Normal file
238
opencompass/configs/datasets/musr/musr_llm_judge_gen.py
Normal file
@ -0,0 +1,238 @@
|
||||
from opencompass.datasets import MusrDataset, MusrEvaluator
|
||||
from opencompass.openicl import PromptTemplate, ZeroRetriever, GenInferencer
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {question}\n {options_str} \n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
DATASET_CONFIGS = {
|
||||
'murder_mysteries': {
|
||||
'abbr': 'musr_murder_mysteries',
|
||||
'name': 'murder_mysteries',
|
||||
'path': 'opencompass/musr',
|
||||
'reader_cfg': dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='{system_prompt}'
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MusrDataset,
|
||||
path='opencompass/musr',
|
||||
name='murder_mysteries',
|
||||
reader_cfg=dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
},
|
||||
'object_placements': {
|
||||
'abbr': 'musr_object_placements',
|
||||
'name': 'object_placements',
|
||||
'path': 'opencompass/musr',
|
||||
'reader_cfg': dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='{system_prompt}'
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MusrDataset,
|
||||
path='opencompass/musr',
|
||||
name='object_placements',
|
||||
reader_cfg=dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
},
|
||||
'team_allocation': {
|
||||
'abbr': 'musr_team_allocation',
|
||||
'name': 'team_allocation',
|
||||
'path': 'opencompass/musr',
|
||||
'reader_cfg': dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt='{system_prompt}'
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MusrDataset,
|
||||
path='opencompass/musr',
|
||||
name='team_allocation',
|
||||
reader_cfg=dict(
|
||||
input_columns=['context', 'question_text', 'question', 'answer', 'choices', 'choices_str', 'intermediate_trees', 'intermediate_data', 'prompt', 'system_prompt', 'gold_answer', 'scidx', 'self_consistency_n', 'ablation_name'],
|
||||
output_column='gold_answer',
|
||||
),
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
musr_datasets = []
|
||||
|
||||
for config in DATASET_CONFIGS.values():
|
||||
dataset = dict(
|
||||
abbr=config['abbr'],
|
||||
type=MusrDataset,
|
||||
path=config['path'],
|
||||
name=config['name'],
|
||||
reader_cfg=config['reader_cfg'],
|
||||
infer_cfg=config['infer_cfg'],
|
||||
eval_cfg=config['eval_cfg'],
|
||||
)
|
||||
musr_datasets.append(dataset)
|
Loading…
Reference in New Issue
Block a user