2023-12-25 21:59:16 +08:00
from opencompass . openicl . icl_prompt_template import PromptTemplate
from opencompass . openicl . icl_retriever import ZeroRetriever
from opencompass . openicl . icl_inferencer import GenInferencer
2024-06-28 14:16:34 +08:00
from opencompass . datasets import HumanevalDataset , HumanEvalEvaluator , humaneval_postprocess
2023-12-25 21:59:16 +08:00
humaneval_reader_cfg = dict (
input_columns = [ ' prompt ' ] , output_column = ' task_id ' , train_split = ' test ' )
# TODO: allow empty output-column
humaneval_infer_cfg = dict (
prompt_template = dict (
type = PromptTemplate ,
template = dict ( round = [
dict (
role = ' HUMAN ' ,
prompt = ' Below is an instruction that describes a task. Write a response that appropriately completes the request. \n \n ### Instruction: \n Complete the following python function.: \n {prompt} \n \n ### Response: \n ' ) ,
] ) ) ,
retriever = dict ( type = ZeroRetriever ) ,
inferencer = dict ( type = GenInferencer , max_out_len = 512 ) )
humaneval_eval_cfg = dict (
2024-06-28 14:16:34 +08:00
evaluator = dict ( type = HumanEvalEvaluator ) ,
2023-12-25 21:59:16 +08:00
pred_role = ' BOT ' ,
k = [ 1 , 10 , 100 ] , # the parameter only for humaneval
pred_postprocessor = dict ( type = humaneval_postprocess ) ,
)
humaneval_datasets = [
dict (
abbr = ' openai_humaneval ' ,
type = HumanevalDataset ,
path = ' ./data/humaneval/human-eval-v2-20210705.jsonl ' ,
reader_cfg = humaneval_reader_cfg ,
infer_cfg = humaneval_infer_cfg ,
eval_cfg = humaneval_eval_cfg )
]