from opencompass.openicl.icl_prompt_template import PromptTemplate from opencompass.openicl.icl_retriever import MDLRetriever from opencompass.openicl.icl_inferencer import PPLInferencer from opencompass.openicl.icl_evaluator import AccEvaluator from opencompass.datasets import commonsenseqaDataset _ice_template = dict( type=PromptTemplate, template={ 'A': "Answer the following question:\n{question}\nAnswer: {A}", 'B': "Answer the following question:\n{question}\nAnswer: {B}", 'C': "Answer the following question:\n{question}\nAnswer: {C}", 'D': "Answer the following question:\n{question}\nAnswer: {D}", 'E': "Answer the following question:\n{question}\nAnswer: {E}", }, ice_token='') commonsenseqa_infer_cfg = dict( ice_template=_ice_template, retriever=dict( type=MDLRetriever, ice_num=8, candidate_num=30, select_time=10, seed=1, batch_size=12, ice_template=_ice_template), inferencer=dict(type=PPLInferencer)) commonsenseqa_eval_cfg = dict(evaluator=dict(type=AccEvaluator)) commonsenseqa_datasets = [ dict( type=commonsenseqaDataset, path='commonsense_qa', reader_cfg=dict( input_columns=['question', 'A', 'B', 'C', 'D', 'E'], output_column='answerKey', test_split='validation', ), infer_cfg=commonsenseqa_infer_cfg, eval_cfg=commonsenseqa_eval_cfg) ] del _ice_template