OpenCompass/configs/datasets/SuperGLUE_BoolQ/SuperGLUE_BoolQ_ppl_314797.py

44 lines
1.3 KiB
Python
Raw Normal View History

from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import BoolQDataset_V3
BoolQ_reader_cfg = dict(
2024-05-14 15:35:58 +08:00
input_columns=['question', 'passage'],
output_column='label',
test_split='train')
BoolQ_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template={
'false':
dict(round=[
2024-05-14 15:35:58 +08:00
dict(role='HUMAN', prompt='Passage: {passage}\nQuestion: {question}?'),
dict(role='BOT', prompt='Answer: No'),
]),
'true':
dict(round=[
2024-05-14 15:35:58 +08:00
dict(role='HUMAN', prompt='Passage: {passage}\nQuestion: {question}?'),
dict(role='BOT', prompt='Answer: Yes'),
]),
},
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer),
)
BoolQ_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
BoolQ_datasets = [
dict(
2024-05-14 15:35:58 +08:00
abbr='BoolQ',
type=BoolQDataset_V3,
2024-05-14 15:35:58 +08:00
path='./data/SuperGLUE/BoolQ/val.jsonl',
reader_cfg=BoolQ_reader_cfg,
infer_cfg=BoolQ_infer_cfg,
eval_cfg=BoolQ_eval_cfg,
)
]