2023-08-10 14:04:18 +08:00
from opencompass . openicl . icl_prompt_template import PromptTemplate
from opencompass . openicl . icl_retriever import ZeroRetriever
from opencompass . openicl . icl_inferencer import GenInferencer
from opencompass . openicl . icl_evaluator import AccEvaluator
from opencompass . datasets import AnliDataset
from opencompass . utils . text_postprocessors import first_capital_postprocess
anli_datasets = [ ]
for _split in [ ' R1 ' , ' R2 ' , ' R3 ' ] :
anli_reader_cfg = dict (
2024-05-14 15:35:58 +08:00
input_columns = [ ' context ' , ' hypothesis ' ] ,
output_column = ' label ' ,
2023-08-10 14:04:18 +08:00
)
anli_infer_cfg = dict (
prompt_template = dict (
type = PromptTemplate ,
template = dict (
round = [
2024-05-14 15:35:58 +08:00
dict ( role = ' HUMAN ' , prompt = ' {context} \n {hypothesis} \n Question: What is the relation between the two sentences? \n A. Contradiction \n B. Entailment \n C. Neutral \n Answer: ' ) ,
dict ( role = ' BOT ' , prompt = ' {label} ' ) ,
2023-08-10 14:04:18 +08:00
]
) ,
) ,
retriever = dict ( type = ZeroRetriever ) ,
inferencer = dict ( type = GenInferencer ) ,
)
anli_eval_cfg = dict ( evaluator = dict ( type = AccEvaluator ) ,
2024-05-14 15:35:58 +08:00
pred_role = ' BOT ' ,
2023-08-10 14:04:18 +08:00
pred_postprocessor = dict ( type = first_capital_postprocess ) )
anli_datasets . append (
dict (
type = AnliDataset ,
2024-05-14 15:35:58 +08:00
abbr = f ' anli- { _split } ' ,
path = f ' data/anli/anli_v1.0/ { _split } /dev.jsonl ' ,
2023-08-10 14:04:18 +08:00
reader_cfg = anli_reader_cfg ,
infer_cfg = anli_infer_cfg ,
eval_cfg = anli_eval_cfg ,
)
)