2023-07-04 22:11:33 +08:00
|
|
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|
|
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
|
|
from opencompass.openicl.icl_inferencer import PPLInferencer
|
|
|
|
from opencompass.openicl.icl_evaluator import AccEvaluator
|
2023-11-13 13:00:37 +08:00
|
|
|
from opencompass.datasets import piqaDataset
|
2023-07-04 22:11:33 +08:00
|
|
|
|
|
|
|
piqa_reader_cfg = dict(
|
|
|
|
input_columns=['goal', 'sol1', 'sol2'],
|
|
|
|
output_column='label',
|
|
|
|
test_split='validation')
|
|
|
|
|
|
|
|
piqa_infer_cfg = dict(
|
|
|
|
prompt_template=dict(
|
|
|
|
type=PromptTemplate,
|
|
|
|
template={
|
|
|
|
0: 'The following makes sense: \nQ: {goal}\nA: {sol1}\n',
|
|
|
|
1: 'The following makes sense: \nQ: {goal}\nA: {sol2}\n'
|
|
|
|
}),
|
|
|
|
retriever=dict(type=ZeroRetriever),
|
|
|
|
inferencer=dict(type=PPLInferencer))
|
|
|
|
|
|
|
|
piqa_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
|
|
|
|
|
|
|
|
piqa_datasets = [
|
|
|
|
dict(
|
2023-11-13 13:00:37 +08:00
|
|
|
abbr='piqa',
|
|
|
|
type=piqaDataset,
|
|
|
|
path='./data/piqa',
|
2023-07-04 22:11:33 +08:00
|
|
|
reader_cfg=piqa_reader_cfg,
|
|
|
|
infer_cfg=piqa_infer_cfg,
|
|
|
|
eval_cfg=piqa_eval_cfg)
|
|
|
|
]
|