OpenCompass/opencompass/configs/datasets/longbench/longbenchtrec/longbench_trec_gen_824187.py

50 lines
1.4 KiB
Python
Raw Normal View History

from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
2024-09-06 15:50:12 +08:00
from opencompass.datasets import (
LongBenchClassificationEvaluator,
LongBenchtrecDataset,
trec_postprocess,
)
LongBench_trec_reader_cfg = dict(
input_columns=['context', 'input'],
output_column='all_labels',
train_split='test',
2024-09-06 15:50:12 +08:00
test_split='test',
)
LongBench_trec_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
2024-09-06 15:50:12 +08:00
dict(
role='HUMAN',
prompt='Please determine the type of the question below. Here are some examples of questions.\n\n{context}\n{input}',
),
],
),
),
retriever=dict(type=ZeroRetriever),
2024-09-06 15:50:12 +08:00
inferencer=dict(type=GenInferencer, max_out_len=64),
)
LongBench_trec_eval_cfg = dict(
evaluator=dict(type=LongBenchClassificationEvaluator),
pred_role='BOT',
pred_postprocessor=dict(type=trec_postprocess),
)
LongBench_trec_datasets = [
dict(
type=LongBenchtrecDataset,
abbr='LongBench_trec',
2024-09-06 15:50:12 +08:00
path='opencompass/Longbench',
name='trec',
reader_cfg=LongBench_trec_reader_cfg,
infer_cfg=LongBench_trec_infer_cfg,
2024-09-06 15:50:12 +08:00
eval_cfg=LongBench_trec_eval_cfg,
)
]