OpenCompass/opencompass/configs/datasets/math/math_prm800k_500_gen.py

45 lines
1.2 KiB
Python
Raw Normal View History

2024-11-21 21:29:43 +08:00
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
2025-03-12 21:58:58 +08:00
from opencompass.datasets import (
MATHDataset,
MATHEvaluator,
math_postprocess_v2,
normalize_final_answer,
)
2024-11-21 21:29:43 +08:00
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
math_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
2025-03-12 21:58:58 +08:00
dict(
role='HUMAN',
prompt='{problem}\nPlease reason step by step, and put your final answer within \\boxed{}.',
),
2024-11-21 21:29:43 +08:00
]
),
),
retriever=dict(type=ZeroRetriever),
2025-03-12 21:58:58 +08:00
inferencer=dict(type=GenInferencer),
2024-11-21 21:29:43 +08:00
)
# postprocess v2
math_eval_cfg = dict(
2025-03-12 21:58:58 +08:00
evaluator=dict(type=MATHEvaluator, version='v2'),
pred_postprocessor=dict(type=math_postprocess_v2),
2024-11-21 21:29:43 +08:00
)
math_datasets = [
dict(
type=MATHDataset,
abbr='math_prm800k_500',
path='opencompass/math',
2025-03-12 21:58:58 +08:00
file_name='test_prm800k_500.json',
2024-11-21 21:29:43 +08:00
reader_cfg=math_reader_cfg,
infer_cfg=math_infer_cfg,
eval_cfg=math_eval_cfg,
)
2025-03-12 21:58:58 +08:00
]