mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Update] History code bench pass@k update (#2102)
* bigcodebench * humaneval * humanevalx * humanevalx * livecodebench * mbpp * humaneval_plus * fix bug * template * max_out fix * template update
This commit is contained in:
parent
8c0ccf9a6b
commit
7a7a4517ab
155
examples/eval_codebench_full.py
Normal file
155
examples/eval_codebench_full.py
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
# This config is used to test all the code benchmarks
|
||||||
|
from mmengine.config import read_base
|
||||||
|
import os.path as osp
|
||||||
|
from opencompass.runners import LocalRunner, VOLCRunner
|
||||||
|
from opencompass.partitioners import NaivePartitioner, NumWorkerPartitioner
|
||||||
|
from opencompass.tasks import OpenICLInferTask, OpenICLEvalTask
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
# Datasets Part
|
||||||
|
# bigcodebench
|
||||||
|
from opencompass.configs.datasets.bigcodebench.bigcodebench_full_instruct_gen import (
|
||||||
|
bigcodebench_full_instruct_datasets
|
||||||
|
)
|
||||||
|
from opencompass.configs.datasets.bigcodebench.bigcodebench_hard_instruct_gen import (
|
||||||
|
bigcodebench_hard_instruct_datasets
|
||||||
|
)
|
||||||
|
# livecodebench code generation lite v5
|
||||||
|
from opencompass.configs.datasets.livecodebench.livecodebench_time_split_gen_a4f90b import (
|
||||||
|
LCB_datasets
|
||||||
|
)
|
||||||
|
# huamneval series
|
||||||
|
from opencompass.configs.datasets.humaneval.humaneval_openai_sample_evals_gen_dcae0e import (
|
||||||
|
humaneval_datasets
|
||||||
|
)
|
||||||
|
from opencompass.configs.datasets.humaneval_pro.humaneval_pro_gen import (
|
||||||
|
humanevalpro_datasets
|
||||||
|
)
|
||||||
|
from opencompass.configs.datasets.humanevalx.humanevalx_gen_620cfa import (
|
||||||
|
humanevalx_datasets
|
||||||
|
)
|
||||||
|
from opencompass.configs.datasets.humaneval_plus.humaneval_plus_gen import (
|
||||||
|
humaneval_plus_datasets
|
||||||
|
)
|
||||||
|
# mbpp series
|
||||||
|
from opencompass.configs.datasets.mbpp.mbpp_gen import (
|
||||||
|
mbpp_datasets
|
||||||
|
)
|
||||||
|
from opencompass.configs.datasets.mbpp_pro.mbpp_pro_gen import (
|
||||||
|
mbpppro_datasets
|
||||||
|
)
|
||||||
|
# multipl-e
|
||||||
|
from opencompass.configs.datasets.multipl_e.multiple_gen import (
|
||||||
|
multiple_datasets
|
||||||
|
)
|
||||||
|
# ds1000
|
||||||
|
from opencompass.configs.datasets.ds1000.ds1000_service_eval_gen_cbc84f import (
|
||||||
|
ds1000_datasets
|
||||||
|
)
|
||||||
|
|
||||||
|
# Models Part
|
||||||
|
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_7b_instruct import (
|
||||||
|
models as lmdeploy_qwen2_5_7b_instruct_model,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Summary Groups
|
||||||
|
from opencompass.configs.summarizers.groups.ds1000 import (
|
||||||
|
ds1000_summary_groups,
|
||||||
|
)
|
||||||
|
from opencompass.configs.summarizers.groups.multipl_e import (
|
||||||
|
multiple_summary_groups,
|
||||||
|
)
|
||||||
|
from opencompass.configs.summarizers.groups.humanevalx import (
|
||||||
|
humanevalx_summary_groups,
|
||||||
|
)
|
||||||
|
|
||||||
|
# models config
|
||||||
|
models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
|
||||||
|
|
||||||
|
for model in models:
|
||||||
|
model['max_seq_len'] = 16384
|
||||||
|
model['max_out_len'] = 8192
|
||||||
|
|
||||||
|
# datasets config
|
||||||
|
datasets = sum(
|
||||||
|
(v for k, v in locals().items() if k.endswith('_datasets')),
|
||||||
|
[],
|
||||||
|
)
|
||||||
|
|
||||||
|
for item in humanevalx_datasets:
|
||||||
|
item['eval_cfg']['evaluator'][
|
||||||
|
'ip_address'
|
||||||
|
] = 'codeeval.opencompass.org.cn/humanevalx'
|
||||||
|
item['eval_cfg']['evaluator']['port'] = ''
|
||||||
|
for item in ds1000_datasets:
|
||||||
|
item['eval_cfg']['evaluator'][
|
||||||
|
'ip_address'
|
||||||
|
] = 'codeeval.opencompass.org.cn/ds1000'
|
||||||
|
item['eval_cfg']['evaluator']['port'] = ''
|
||||||
|
|
||||||
|
|
||||||
|
for dataset in datasets:
|
||||||
|
dataset['infer_cfg']['inferencer']['max_out_len'] = 8192
|
||||||
|
|
||||||
|
|
||||||
|
# summary
|
||||||
|
summary_groups = sum(
|
||||||
|
[v for k, v in locals().items() if k.endswith('_summary_groups')], []
|
||||||
|
)
|
||||||
|
summary_groups.append(
|
||||||
|
{'name': 'humanevalx',
|
||||||
|
'subsets': ['humanevalx-python', 'humanevalx-cpp', 'humanevalx-java', 'humanevalx-js']}
|
||||||
|
)
|
||||||
|
summarizer = dict(
|
||||||
|
dataset_abbrs = [
|
||||||
|
['bigcodebench_hard_instruct', 'pass@1'],
|
||||||
|
['bigcodebench_full_instruct', 'pass@1'],
|
||||||
|
['lcb_code_generation', 'pass@1'],
|
||||||
|
['openai_humaneval', 'humaneval_pass@1'],
|
||||||
|
['mbpp', 'score'],
|
||||||
|
['humaneval_pro', 'pass@1'],
|
||||||
|
['mbpp_pro', 'pass@1'],
|
||||||
|
['humaneval_plus', 'humaneval_plus_pass@1'],
|
||||||
|
['multiple', 'naive_average'],
|
||||||
|
['humanevalx', 'naive_average'],
|
||||||
|
['ds1000', 'naive_average'],
|
||||||
|
'',
|
||||||
|
'humanevalx-python',
|
||||||
|
'humanevalx-cpp',
|
||||||
|
'humanevalx-java',
|
||||||
|
'humanevalx-js',
|
||||||
|
'',
|
||||||
|
'ds1000_Pandas',
|
||||||
|
'ds1000_Numpy',
|
||||||
|
'ds1000_Tensorflow',
|
||||||
|
'ds1000_Scipy',
|
||||||
|
'ds1000_Sklearn',
|
||||||
|
'ds1000_Pytorch',
|
||||||
|
'ds1000_Matplotlib',
|
||||||
|
'',
|
||||||
|
'humaneval-multiple-cpp',
|
||||||
|
'humaneval-multiple-cs',
|
||||||
|
'humaneval-multiple-go',
|
||||||
|
'humaneval-multiple-java',
|
||||||
|
'humaneval-multiple-rb',
|
||||||
|
'humaneval-multiple-js',
|
||||||
|
'humaneval-multiple-php',
|
||||||
|
'humaneval-multiple-r',
|
||||||
|
'humaneval-multiple-rs',
|
||||||
|
'humaneval-multiple-sh',
|
||||||
|
'',
|
||||||
|
'mbpp-multiple-cpp',
|
||||||
|
'mbpp-multiple-cs',
|
||||||
|
'mbpp-multiple-go',
|
||||||
|
'mbpp-multiple-java',
|
||||||
|
'mbpp-multiple-rb',
|
||||||
|
'mbpp-multiple-js',
|
||||||
|
'mbpp-multiple-php',
|
||||||
|
'mbpp-multiple-r',
|
||||||
|
'mbpp-multiple-rs',
|
||||||
|
'mbpp-multiple-sh'
|
||||||
|
],
|
||||||
|
summary_groups=summary_groups,
|
||||||
|
)
|
||||||
|
|
||||||
|
work_dir = 'outputs/code'
|
@ -0,0 +1,44 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import (BigCodeBenchDataset, BigCodeBenchEvaluator)
|
||||||
|
|
||||||
|
bigcodebench_full_reader_cfg = dict(
|
||||||
|
input_columns=['instruct_prompt'],
|
||||||
|
output_column='test',
|
||||||
|
)
|
||||||
|
|
||||||
|
bigcodebench_full_infer_cfg = dict(prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin=[dict(role='system', fallback_role='HUMAN', prompt='')],
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='{instruct_prompt}'),
|
||||||
|
])),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
|
bigcodebench_full_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=BigCodeBenchEvaluator,
|
||||||
|
release_version='v0.1.2',
|
||||||
|
eval_type='instruct',
|
||||||
|
# remote_execute_api='https://bigcode-bigcodebench-evaluator.hf.space/',
|
||||||
|
remote_execute_api=
|
||||||
|
'https://opencompass-opencompass-bigcodebench-evaluator.hf.space', # noqa: E501
|
||||||
|
dataset_version='full',
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
bigcodebench_full_instruct_datasets = [
|
||||||
|
dict(abbr='bigcodebench_full_instruct',
|
||||||
|
type=BigCodeBenchDataset,
|
||||||
|
path='opencompass/bigcodebench',
|
||||||
|
reader_cfg=bigcodebench_full_reader_cfg,
|
||||||
|
infer_cfg=bigcodebench_full_infer_cfg,
|
||||||
|
eval_cfg=bigcodebench_full_eval_cfg,
|
||||||
|
release_version='v0.1.2',
|
||||||
|
n=5,
|
||||||
|
k=3)
|
||||||
|
]
|
@ -0,0 +1,48 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import (BigCodeBenchDataset, BigCodeBenchEvaluator)
|
||||||
|
|
||||||
|
bigcodebench_hard_reader_cfg = dict(
|
||||||
|
input_columns=['instruct_prompt'],
|
||||||
|
output_column='test',
|
||||||
|
)
|
||||||
|
|
||||||
|
bigcodebench_hard_infer_cfg = dict(prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin=[dict(role='system', fallback_role='HUMAN', prompt='')],
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='{instruct_prompt}'),
|
||||||
|
])),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer)
|
||||||
|
)
|
||||||
|
|
||||||
|
bigcodebench_hard_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=BigCodeBenchEvaluator,
|
||||||
|
release_version='v0.1.2',
|
||||||
|
eval_type='instruct',
|
||||||
|
# remote_execute_api='https://bigcode-bigcodebench-evaluator.hf.space/',
|
||||||
|
remote_execute_api=
|
||||||
|
'https://opencompass-opencompass-bigcodebench-evaluator.hf.space', # noqa: E501
|
||||||
|
dataset_version='hard',
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
bigcodebench_hard_instruct_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='bigcodebench_hard_instruct',
|
||||||
|
type=BigCodeBenchDataset,
|
||||||
|
path='opencompass/bigcodebench',
|
||||||
|
reader_cfg=bigcodebench_hard_reader_cfg,
|
||||||
|
infer_cfg=bigcodebench_hard_infer_cfg,
|
||||||
|
eval_cfg=bigcodebench_hard_eval_cfg,
|
||||||
|
release_version='v0.1.2',
|
||||||
|
dataset_version='hard',
|
||||||
|
n=5,
|
||||||
|
k=3
|
||||||
|
)
|
||||||
|
]
|
@ -0,0 +1,37 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
|
humaneval_reader_cfg = dict(
|
||||||
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
# TODO: allow empty output-column
|
||||||
|
humaneval_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='Read the following function signature and docstring, and fully implement the function described. Your response should only contain the code for this function.\n{prompt}'),
|
||||||
|
])),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
|
humaneval_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
|
pred_role='BOT',
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='openai_humaneval',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='opencompass/humaneval',
|
||||||
|
reader_cfg=humaneval_reader_cfg,
|
||||||
|
infer_cfg=humaneval_infer_cfg,
|
||||||
|
eval_cfg=humaneval_eval_cfg,
|
||||||
|
n=5,
|
||||||
|
k=3)
|
||||||
|
]
|
@ -0,0 +1,39 @@
|
|||||||
|
# THIS SHALL ALSO BE DEPRECATED
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import HumanevalDataset, HumanEvalPlusEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
|
humaneval_plus_reader_cfg = dict(
|
||||||
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
# TODO: allow empty output-column
|
||||||
|
humaneval_plus_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='Complete the following python code:\n{prompt}'),
|
||||||
|
])),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
|
humaneval_plus_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalPlusEvaluator),
|
||||||
|
pred_role='BOT',
|
||||||
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_plus_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='humaneval_plus',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='opencompass/humaneval',
|
||||||
|
reader_cfg=humaneval_plus_reader_cfg,
|
||||||
|
infer_cfg=humaneval_plus_infer_cfg,
|
||||||
|
eval_cfg=humaneval_plus_eval_cfg,
|
||||||
|
n=5,
|
||||||
|
k=3)
|
||||||
|
]
|
@ -0,0 +1,43 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import HumanevalXDataset, HumanevalXEvaluator
|
||||||
|
|
||||||
|
humanevalx_reader_cfg = dict(
|
||||||
|
input_columns=['prompt'], output_column='declaration', train_split='test')
|
||||||
|
|
||||||
|
humanevalx_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template='{prompt}'),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
|
humanevalx_eval_cfg_dict = {
|
||||||
|
lang : dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=HumanevalXEvaluator,
|
||||||
|
language=lang,
|
||||||
|
ip_address=
|
||||||
|
'localhost', # replace to your code_eval_server ip_address, port
|
||||||
|
port=5001), # refer to https://opencompass.readthedocs.io/en/latest/advanced_guides/code_eval_service.html to launch a server
|
||||||
|
pred_role='BOT')
|
||||||
|
for lang in ['python', 'cpp', 'go', 'java', 'js'] # do not support rust now
|
||||||
|
}
|
||||||
|
|
||||||
|
# Please download the needed `xx.jsonl.gz` from
|
||||||
|
# https://github.com/THUDM/CodeGeeX2/tree/main/benchmark/humanevalx
|
||||||
|
# and move them into `data/humanevalx/` folder
|
||||||
|
humanevalx_datasets = [
|
||||||
|
dict(
|
||||||
|
type=HumanevalXDataset,
|
||||||
|
abbr=f'humanevalx-{lang}',
|
||||||
|
language=lang,
|
||||||
|
path='./data/humanevalx',
|
||||||
|
reader_cfg=humanevalx_reader_cfg,
|
||||||
|
infer_cfg=humanevalx_infer_cfg,
|
||||||
|
eval_cfg=humanevalx_eval_cfg_dict[lang],
|
||||||
|
n=5,
|
||||||
|
k=3)
|
||||||
|
for lang in ['python', 'cpp', 'go', 'java', 'js']
|
||||||
|
]
|
@ -0,0 +1,166 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import (
|
||||||
|
LCBCodeGenerationDataset,
|
||||||
|
LCBCodeExecutionDataset,
|
||||||
|
LCBTestOutputPredictionDataset,
|
||||||
|
LCBCodeGenerationEvaluator,
|
||||||
|
LCBCodeExecutionEvaluator,
|
||||||
|
LCBTestOutputEvaluator
|
||||||
|
)
|
||||||
|
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||||
|
|
||||||
|
|
||||||
|
lcb_code_generation_reader_cfg = dict(
|
||||||
|
input_columns=[
|
||||||
|
'question_content',
|
||||||
|
'format_prompt',
|
||||||
|
],
|
||||||
|
# output_column='evaluation_sample',
|
||||||
|
output_column='question_id',
|
||||||
|
)
|
||||||
|
|
||||||
|
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||||
|
|
||||||
|
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||||
|
'### Answer: (use the provided format with backticks)\n\n'
|
||||||
|
|
||||||
|
|
||||||
|
# Code Generation Tasks
|
||||||
|
lcb_code_generation_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt=prompt_template
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer)
|
||||||
|
)
|
||||||
|
|
||||||
|
lcb_code_generation_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=LCBCodeGenerationEvaluator,
|
||||||
|
num_process_evaluate=4,
|
||||||
|
timeout=6,
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
LCBCodeGeneration_dataset = dict(
|
||||||
|
type=LCBCodeGenerationDataset,
|
||||||
|
abbr='lcb_code_generation',
|
||||||
|
path='opencompass/code_generation_lite',
|
||||||
|
reader_cfg=lcb_code_generation_reader_cfg,
|
||||||
|
infer_cfg=lcb_code_generation_infer_cfg,
|
||||||
|
eval_cfg=lcb_code_generation_eval_cfg,
|
||||||
|
n=5,
|
||||||
|
k=3
|
||||||
|
)
|
||||||
|
|
||||||
|
# Code Execution Dataset
|
||||||
|
lcb_code_execution_reader_cfg = dict(
|
||||||
|
input_columns=[
|
||||||
|
'prompt',
|
||||||
|
],
|
||||||
|
output_column='evaluation_sample',
|
||||||
|
)
|
||||||
|
|
||||||
|
lcb_code_execution_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin=[
|
||||||
|
dict(
|
||||||
|
role='SYSTEM',
|
||||||
|
fallback_role='HUMAN',
|
||||||
|
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.'
|
||||||
|
),
|
||||||
|
],
|
||||||
|
round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='{prompt}'
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer)
|
||||||
|
)
|
||||||
|
|
||||||
|
lcb_code_execution_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=LCBCodeExecutionEvaluator,
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
LCBCodeExecution_dataset = dict(
|
||||||
|
type=LCBCodeExecutionDataset,
|
||||||
|
abbr='lcb_code_execution',
|
||||||
|
path='opencompass/execution-v2',
|
||||||
|
reader_cfg=lcb_code_execution_reader_cfg,
|
||||||
|
infer_cfg=lcb_code_execution_infer_cfg,
|
||||||
|
eval_cfg=lcb_code_execution_eval_cfg,
|
||||||
|
)
|
||||||
|
|
||||||
|
# TestOuputput Dataset
|
||||||
|
lcb_test_output_reader_cfg = dict(
|
||||||
|
input_columns=[
|
||||||
|
'prompt',
|
||||||
|
],
|
||||||
|
output_column='evaluation_sample',
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||||
|
|
||||||
|
lcb_test_output_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
# begin=[
|
||||||
|
# dict(
|
||||||
|
# role='SYSTEM',
|
||||||
|
# prompt=system_prompt
|
||||||
|
# ),
|
||||||
|
# ],
|
||||||
|
round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='{prompt}'
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer)
|
||||||
|
)
|
||||||
|
|
||||||
|
lcb_test_output_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=LCBTestOutputEvaluator,
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
LCBTestOutput_dataset = dict(
|
||||||
|
type=LCBTestOutputPredictionDataset,
|
||||||
|
abbr='lcb_test_output',
|
||||||
|
path='opencompass/test_generation',
|
||||||
|
reader_cfg=lcb_test_output_reader_cfg,
|
||||||
|
infer_cfg=lcb_test_output_infer_cfg,
|
||||||
|
eval_cfg=lcb_test_output_eval_cfg,
|
||||||
|
)
|
||||||
|
|
||||||
|
LCB_datasets = [
|
||||||
|
LCBCodeGeneration_dataset,
|
||||||
|
# LCBCodeExecution_dataset,
|
||||||
|
# LCBTestOutput_dataset,
|
||||||
|
]
|
44
opencompass/configs/datasets/mbpp/mbpp_repeat_gen_18dd1b.py
Normal file
44
opencompass/configs/datasets/mbpp/mbpp_repeat_gen_18dd1b.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import MBPPDataset, MBPPEvaluator
|
||||||
|
|
||||||
|
mbpp_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_list_2')
|
||||||
|
|
||||||
|
mbpp_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the similar elements from the given two tuple lists. Your code should pass these tests:\n\n assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\nassert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4) \nassert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14) \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res)' \n[DONE] \n\n "),
|
||||||
|
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a python function to identify non-prime numbers. Your code should pass these tests:\n\n assert is_not_prime(2) == False \nassert is_not_prime(10) == True \nassert is_not_prime(35) == True \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result' \n[DONE] \n\n "),
|
||||||
|
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the largest integers from a given list of numbers using heap queue algorithm. Your code should pass these tests:\n\n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \nassert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \nassert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums' \n[DONE] \n\n "),
|
||||||
|
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer),
|
||||||
|
)
|
||||||
|
|
||||||
|
mbpp_eval_cfg = dict(evaluator=dict(type=MBPPEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
mbpp_datasets = [
|
||||||
|
dict(
|
||||||
|
type=MBPPDataset,
|
||||||
|
abbr='mbpp',
|
||||||
|
path='opencompass/mbpp',
|
||||||
|
reader_cfg=mbpp_reader_cfg,
|
||||||
|
infer_cfg=mbpp_infer_cfg,
|
||||||
|
eval_cfg=mbpp_eval_cfg,
|
||||||
|
n=5,
|
||||||
|
k=3
|
||||||
|
)
|
||||||
|
]
|
6
opencompass/configs/summarizers/groups/multipl_e.py
Normal file
6
opencompass/configs/summarizers/groups/multipl_e.py
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
multiple_summary_groups = []
|
||||||
|
|
||||||
|
humaneval_multiple = ['humaneval-multiple-cpp', 'humaneval-multiple-cs', 'humaneval-multiple-go', 'humaneval-multiple-java', 'humaneval-multiple-rb', 'humaneval-multiple-js', 'humaneval-multiple-php', 'humaneval-multiple-r', 'humaneval-multiple-rs', 'humaneval-multiple-sh']
|
||||||
|
mbpp_multiple = ['mbpp-multiple-cpp', 'mbpp-multiple-cs', 'mbpp-multiple-go', 'mbpp-multiple-java', 'mbpp-multiple-rb', 'mbpp-multiple-js', 'mbpp-multiple-php', 'mbpp-multiple-r', 'mbpp-multiple-rs', 'mbpp-multiple-sh']
|
||||||
|
multiple_summary_groups.append({'name': 'multiple', 'subsets': humaneval_multiple})
|
||||||
|
multiple_summary_groups.append({'name':'multiple','subsets': mbpp_multiple})
|
@ -188,7 +188,9 @@ class BigCodeBenchEvaluator(BaseEvaluator):
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
eval_client = Client(self.remote_execute_api,
|
eval_client = Client(self.remote_execute_api,
|
||||||
httpx_kwargs=dict(proxies=proxies))
|
httpx_kwargs=dict(
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=httpx.Timeout(100.0)))
|
||||||
results, pass_at_k = eval_client.predict(
|
results, pass_at_k = eval_client.predict(
|
||||||
split=self.eval_type,
|
split=self.eval_type,
|
||||||
samples=handle_file(submitted_contents_path),
|
samples=handle_file(submitted_contents_path),
|
||||||
@ -196,7 +198,7 @@ class BigCodeBenchEvaluator(BaseEvaluator):
|
|||||||
**self.eval_kwargs)
|
**self.eval_kwargs)
|
||||||
break
|
break
|
||||||
except (httpx.ReadTimeout, CancelledError):
|
except (httpx.ReadTimeout, CancelledError):
|
||||||
logger.info('Read timeout error. Retrying in 4s...')
|
logger.info('Read timeout error. Retrying in 10s...')
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
if 'pass@1' in pass_at_k.keys():
|
if 'pass@1' in pass_at_k.keys():
|
||||||
|
@ -183,13 +183,13 @@ def humaneval_postprocess_v2(text: str) -> str:
|
|||||||
blocks = re.findall(r'```\w*\n(.*?)```', text, re.DOTALL)
|
blocks = re.findall(r'```\w*\n(.*?)```', text, re.DOTALL)
|
||||||
if len(blocks) >= 1:
|
if len(blocks) >= 1:
|
||||||
text = blocks[0]
|
text = blocks[0]
|
||||||
return text
|
return text.lstrip()
|
||||||
|
|
||||||
def humaneval_postprocess_v3(text: str) -> str:
|
def humaneval_postprocess_v3(text: str) -> str:
|
||||||
blocks = re.findall(r'```\w*\n(.*?)```', text, re.DOTALL)
|
blocks = re.findall(r'```\w*\n(.*?)```', text, re.DOTALL)
|
||||||
if len(blocks) >= 1:
|
if len(blocks) >= 1:
|
||||||
text = blocks[-1]
|
text = blocks[-1]
|
||||||
return text
|
return text.lstrip()
|
||||||
|
|
||||||
def humaneval_internal_v2_postprocess(text: str):
|
def humaneval_internal_v2_postprocess(text: str):
|
||||||
if text.startswith(' ') and not text.startswith(' '):
|
if text.startswith(' ') and not text.startswith(' '):
|
||||||
|
@ -248,6 +248,28 @@ class LCBCodeGenerationEvaluator(BaseEvaluator):
|
|||||||
end_date=end_date)['test']
|
end_date=end_date)['test']
|
||||||
self.extractor_version = extractor_version
|
self.extractor_version = extractor_version
|
||||||
|
|
||||||
|
def _build_results(self, extracted_predictions, metrics, eval_results,
|
||||||
|
final_metadata):
|
||||||
|
results = {}
|
||||||
|
results['pass@1'] = metrics.get('pass@1', 0.0)
|
||||||
|
details = []
|
||||||
|
# Safely get the details list from metrics
|
||||||
|
r = metrics.get('details', {}).get('pass@1', [])
|
||||||
|
for i, (ep, er, fm) in enumerate(
|
||||||
|
zip(extracted_predictions.values(), eval_results.values(),
|
||||||
|
final_metadata)):
|
||||||
|
detail = {
|
||||||
|
'extracted_prediction':
|
||||||
|
ep[0] if isinstance(ep, list) and ep else ep,
|
||||||
|
'eval_result': er[0] if isinstance(er, list) and er else er,
|
||||||
|
'final_metadata': fm[0] if isinstance(fm, list) and fm else fm
|
||||||
|
}
|
||||||
|
# Use r[i] if available, otherwise fallback to False
|
||||||
|
detail['correct'] = bool(r[i] == 100.0) if i < len(r) else False
|
||||||
|
details.append(detail)
|
||||||
|
results['details'] = details
|
||||||
|
return results
|
||||||
|
|
||||||
def score(self, predictions, references):
|
def score(self, predictions, references):
|
||||||
if len(predictions) != len(references):
|
if len(predictions) != len(references):
|
||||||
return {
|
return {
|
||||||
@ -295,13 +317,14 @@ class LCBCodeGenerationEvaluator(BaseEvaluator):
|
|||||||
num_process_evaluate=self.num_process_evaluate,
|
num_process_evaluate=self.num_process_evaluate,
|
||||||
timeout=self.timeout,
|
timeout=self.timeout,
|
||||||
)
|
)
|
||||||
results = {
|
# results = {
|
||||||
'extracted_predictions': extracted_predictions,
|
# 'extracted_predictions': extracted_predictions,
|
||||||
'eval_results': eval_results
|
# 'eval_results': eval_results
|
||||||
}
|
# }
|
||||||
results.update(metrics)
|
# results.update(metrics)
|
||||||
|
|
||||||
return results
|
return self._build_results(extracted_predictions, metrics,
|
||||||
|
eval_results, final_metadata)
|
||||||
|
|
||||||
|
|
||||||
def evaluate_score(args) -> list[bool]:
|
def evaluate_score(args) -> list[bool]:
|
||||||
|
Loading…
Reference in New Issue
Block a user