2024-08-01 00:42:48 +08:00
from opencompass . openicl . icl_prompt_template import PromptTemplate
from opencompass . openicl . icl_retriever import ZeroRetriever
from opencompass . openicl . icl_inferencer import GenInferencer
from opencompass . datasets import LongBenchF1Evaluator , LongBenchnarrativeqaDataset
LongBench_narrativeqa_reader_cfg = dict (
input_columns = [ ' context ' , ' input ' ] ,
output_column = ' answers ' ,
train_split = ' test ' ,
2024-09-06 15:50:12 +08:00
test_split = ' test ' ,
2024-08-01 00:42:48 +08:00
)
LongBench_narrativeqa_infer_cfg = dict (
prompt_template = dict (
type = PromptTemplate ,
template = dict (
round = [
2024-09-06 15:50:12 +08:00
dict (
role = ' HUMAN ' ,
prompt = ' You are given a story, which can be either a novel or a movie script, and a question. Answer the question as concisely as you can, using a single phrase if possible. Do not provide any explanation. \n \n Story: {context} \n \n Now, answer the question based on the story as concisely as you can, using a single phrase if possible. Do not provide any explanation. \n \n Question: {input} \n \n Answer: ' ,
) ,
] ,
) ,
) ,
2024-08-01 00:42:48 +08:00
retriever = dict ( type = ZeroRetriever ) ,
2024-09-06 15:50:12 +08:00
inferencer = dict ( type = GenInferencer , max_out_len = 128 ) ,
2024-08-01 00:42:48 +08:00
)
LongBench_narrativeqa_eval_cfg = dict (
2024-09-06 15:50:12 +08:00
evaluator = dict ( type = LongBenchF1Evaluator ) , pred_role = ' BOT '
2024-08-01 00:42:48 +08:00
)
LongBench_narrativeqa_datasets = [
dict (
type = LongBenchnarrativeqaDataset ,
abbr = ' LongBench_narrativeqa ' ,
2024-09-06 15:50:12 +08:00
path = ' opencompass/Longbench ' ,
2024-08-01 00:42:48 +08:00
name = ' narrativeqa ' ,
reader_cfg = LongBench_narrativeqa_reader_cfg ,
infer_cfg = LongBench_narrativeqa_infer_cfg ,
2024-09-06 15:50:12 +08:00
eval_cfg = LongBench_narrativeqa_eval_cfg ,
)
2024-08-01 00:42:48 +08:00
]