OpenCompass/configs/datasets/humaneval_plus/humaneval_plus_gen_8e312c.py
Jingming 76a95e9e81
[Feature] Support the use of humaneval_plus. (#720)
* [Feature] Support the use of humaneval_plus.

* [Feature] Add humaneval_plus_gen.py

* minor check

* [Fix] Fix bug

---------

Co-authored-by: yingfhu <yingfhu@gmail.com>
2023-12-20 17:25:17 +08:00

37 lines
1.3 KiB
Python

from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
humaneval_plus_reader_cfg = dict(
input_columns=['prompt'], output_column='task_id', train_split='test')
# TODO: allow empty output-column
humaneval_plus_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt='Complete the following python code:\n{prompt}'),
])),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512))
humaneval_plus_eval_cfg = dict(
evaluator=dict(type=HumanEvaluator,k=1, metric='EvalPlus'),
pred_role='BOT',
k=[1, 10, 100], # the parameter only for humaneval
pred_postprocessor=dict(type=humaneval_postprocess_v2),
)
humaneval_plus_datasets = [
dict(
abbr='humaneval_plus',
type=HumanevalDataset,
path='./data/humaneval/human-eval-v2-20210705.jsonl',
reader_cfg=humaneval_plus_reader_cfg,
infer_cfg=humaneval_plus_infer_cfg,
eval_cfg=humaneval_plus_eval_cfg)
]