mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
Update CIBench (#1089)
* modify the requirements/runtime.txt: numpy==1.23.4 --> numpy>=1.23.4 * update cibench: dataset and evluation * cibench summarizer bug * update cibench * move extract_code import --------- Co-authored-by: zhangchuyu@pjlab.org.cn <zhangchuyu@pjlab.org.cn> Co-authored-by: Leymore <zfz-960727@163.com>
This commit is contained in:
parent
e404b72c52
commit
e4830a6926
@ -1,4 +0,0 @@
|
|||||||
from mmengine.config import read_base
|
|
||||||
|
|
||||||
with read_base():
|
|
||||||
from .CIBench_gen_8ab0dc import ci_datasets # noqa: F401, F403
|
|
@ -19,15 +19,14 @@ cibench_infer_cfg = dict(
|
|||||||
inferencer=dict(type=AgentInferencer, infer_mode='every'),
|
inferencer=dict(type=AgentInferencer, infer_mode='every'),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
libs = ['matplotlib', 'opencv', 'pandas', 'pytorch', 'scipy', 'seaborn']
|
||||||
libs = ['Pandas', 'Matplotlib', 'Opencv', 'SciPy', 'Seaborn', 'PyTorch']
|
|
||||||
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
|
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
|
||||||
|
|
||||||
cibench_datasets = [
|
cibench_datasets = [
|
||||||
dict(
|
dict(
|
||||||
abbr=f"cibench_generation_{lib}",
|
abbr=f"cibench_generation/{lib}",
|
||||||
type=CIBenchDataset,
|
type=CIBenchDataset,
|
||||||
path=f"./data/cibench/{lib}",
|
path=f"./data/cibench_dataset/cibench_generation/{lib}",
|
||||||
internet_check=False,
|
internet_check=False,
|
||||||
reader_cfg=cibench_reader_cfg,
|
reader_cfg=cibench_reader_cfg,
|
||||||
infer_cfg=cibench_infer_cfg,
|
infer_cfg=cibench_infer_cfg,
|
@ -0,0 +1,35 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import AgentInferencer
|
||||||
|
|
||||||
|
from opencompass.datasets import CIBenchDataset, CIBenchEvaluator
|
||||||
|
|
||||||
|
cibench_reader_cfg = dict(
|
||||||
|
input_columns=["questions"],
|
||||||
|
output_column="references",
|
||||||
|
train_split='test',
|
||||||
|
test_split='test')
|
||||||
|
|
||||||
|
cibench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template="""{questions}""",
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=AgentInferencer, infer_mode='every_with_gt'),
|
||||||
|
)
|
||||||
|
|
||||||
|
libs = ['matplotlib', 'opencv', 'pandas', 'pytorch', 'scipy', 'seaborn']
|
||||||
|
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
|
||||||
|
|
||||||
|
cibench_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr=f"cibench_generation_oracle/{lib}",
|
||||||
|
type=CIBenchDataset,
|
||||||
|
path=f"./data/cibench_dataset/cibench_generation/{lib}",
|
||||||
|
internet_check=False,
|
||||||
|
reader_cfg=cibench_reader_cfg,
|
||||||
|
infer_cfg=cibench_infer_cfg,
|
||||||
|
eval_cfg=cibench_eval_cfg,
|
||||||
|
) for lib in libs
|
||||||
|
]
|
@ -2,7 +2,7 @@ from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import AgentInferencer
|
from opencompass.openicl.icl_inferencer import AgentInferencer
|
||||||
|
|
||||||
from opencompass.datasets import CIBenchTemplateDataset, CIBenchEvaluator
|
from opencompass.datasets import CIBenchDataset, CIBenchEvaluator
|
||||||
|
|
||||||
cibench_reader_cfg = dict(
|
cibench_reader_cfg = dict(
|
||||||
input_columns=["questions"],
|
input_columns=["questions"],
|
||||||
@ -26,11 +26,10 @@ libs = ['/lightgbm', '/matplotlib', '/nltk', '/opencv', '/pandas', '/pytorch',
|
|||||||
'_chinese/opencv', '_chinese/pandas', '_chinese/pytorch',
|
'_chinese/opencv', '_chinese/pandas', '_chinese/pytorch',
|
||||||
'_chinese/scipy', '_chinese/seaborn', '_chinese/sklearn', '_chinese/tensorflow']
|
'_chinese/scipy', '_chinese/seaborn', '_chinese/sklearn', '_chinese/tensorflow']
|
||||||
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
|
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
|
||||||
|
|
||||||
cibench_datasets = [
|
cibench_datasets = [
|
||||||
dict(
|
dict(
|
||||||
abbr=f"cibench_template{lib}",
|
abbr=f"cibench_template{lib}",
|
||||||
type=CIBenchTemplateDataset,
|
type=CIBenchDataset,
|
||||||
path=f"./data/cibench_dataset/cibench_template{lib}",
|
path=f"./data/cibench_dataset/cibench_template{lib}",
|
||||||
internet_check=False,
|
internet_check=False,
|
||||||
reader_cfg=cibench_reader_cfg,
|
reader_cfg=cibench_reader_cfg,
|
||||||
|
@ -0,0 +1,39 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import AgentInferencer
|
||||||
|
|
||||||
|
from opencompass.datasets import CIBenchDataset, CIBenchEvaluator
|
||||||
|
|
||||||
|
cibench_reader_cfg = dict(
|
||||||
|
input_columns=["questions"],
|
||||||
|
output_column="references",
|
||||||
|
train_split='test',
|
||||||
|
test_split='test')
|
||||||
|
|
||||||
|
cibench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template="""{questions}""",
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=AgentInferencer, infer_mode='every_with_gt'),
|
||||||
|
)
|
||||||
|
|
||||||
|
# no tensorboard
|
||||||
|
libs = ['/lightgbm', '/matplotlib', '/nltk', '/opencv', '/pandas', '/pytorch',
|
||||||
|
'/scipy', '/seaborn', '/sklearn', '/tensorflow',
|
||||||
|
'_chinese/lightgbm', '_chinese/matplotlib', '_chinese/nltk',
|
||||||
|
'_chinese/opencv', '_chinese/pandas', '_chinese/pytorch',
|
||||||
|
'_chinese/scipy', '_chinese/seaborn', '_chinese/sklearn', '_chinese/tensorflow']
|
||||||
|
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role="BOT")
|
||||||
|
cibench_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr=f"cibench_template_oracle{lib}",
|
||||||
|
type=CIBenchDataset,
|
||||||
|
path=f"./data/cibench_dataset/cibench_template{lib}",
|
||||||
|
internet_check=False,
|
||||||
|
reader_cfg=cibench_reader_cfg,
|
||||||
|
infer_cfg=cibench_infer_cfg,
|
||||||
|
eval_cfg=cibench_eval_cfg,
|
||||||
|
) for lib in libs
|
||||||
|
]
|
@ -5,29 +5,58 @@ with read_base():
|
|||||||
|
|
||||||
summarizer = dict(
|
summarizer = dict(
|
||||||
dataset_abbrs=[
|
dataset_abbrs=[
|
||||||
'######## CIBench Generation ########', # category
|
'######## CIBench Generation########', # category
|
||||||
['cibench', 'executable'],
|
'cibench_generation:tool_rate',
|
||||||
['cibench', 'general_correct'],
|
'cibench_generation:executable',
|
||||||
['cibench', 'vis_sim'],
|
'cibench_generation:numeric_correct',
|
||||||
|
'cibench_generation:text_score',
|
||||||
|
'cibench_generation:vis_sim',
|
||||||
|
'######## CIBench Generation Oracle########', # category
|
||||||
|
'cibench_generation_oracle:tool_rate',
|
||||||
|
'cibench_generation_oracle:executable',
|
||||||
|
'cibench_generation_oracle:numeric_correct',
|
||||||
|
'cibench_generation_oracle:text_score',
|
||||||
|
'cibench_generation_oracle:vis_sim',
|
||||||
'######## CIBench Template ########', # category
|
'######## CIBench Template ########', # category
|
||||||
|
'cibench_template:tool_rate',
|
||||||
'cibench_template:executable',
|
'cibench_template:executable',
|
||||||
'cibench_template:numeric_correct',
|
'cibench_template:numeric_correct',
|
||||||
'cibench_template:text_score',
|
'cibench_template:text_score',
|
||||||
'cibench_template:vis_sim',
|
'cibench_template:vis_sim',
|
||||||
|
'######## CIBench Template Oracle########', # category
|
||||||
|
'cibench_template_oracle:tool_rate',
|
||||||
|
'cibench_template_oracle:executable',
|
||||||
|
'cibench_template_oracle:numeric_correct',
|
||||||
|
'cibench_template_oracle:text_score',
|
||||||
|
'cibench_template_oracle:vis_sim',
|
||||||
'######## CIBench Template Chinese ########', # category
|
'######## CIBench Template Chinese ########', # category
|
||||||
|
'cibench_template_cn:tool_rate',
|
||||||
'cibench_template_cn:executable',
|
'cibench_template_cn:executable',
|
||||||
'cibench_template_cn:numeric_correct',
|
'cibench_template_cn:numeric_correct',
|
||||||
'cibench_template_cn:text_score',
|
'cibench_template_cn:text_score',
|
||||||
'cibench_template_cn:vis_sim',
|
'cibench_template_cn:vis_sim',
|
||||||
'######## CIBench Template w/o NLTK ########', # category no text score becase it is only for nltk
|
'######## CIBench Template Chinese Oracle########', # category
|
||||||
'cibench_template_wo_nltk:executable',
|
'cibench_template_cn_oracle:tool_rate',
|
||||||
'cibench_template_wo_nltk:numeric_correct',
|
'cibench_template_cn_oracle:executable',
|
||||||
'cibench_template_wo_nltk:vis_sim',
|
'cibench_template_cn_oracle:numeric_correct',
|
||||||
'######## CIBench Template Chinese w/o NLTK ########', # category
|
'cibench_template_cn_oracle:text_score',
|
||||||
'cibench_template_cn_wo_nltk:executable',
|
'cibench_template_cn_oracle:vis_sim',
|
||||||
'cibench_template_cn_wo_nltk:numeric_correct',
|
'######## CIBench Category Metric ########',
|
||||||
'cibench_template_cn_wo_nltk:vis_sim',
|
'cibench_data_manipulation:scores',
|
||||||
|
'cibench_data_visualization:scores',
|
||||||
|
'cibench_modeling:scores',
|
||||||
|
'cibench_nlp:scores',
|
||||||
|
'cibench_ip:scores',
|
||||||
|
'cibench_math:scores',
|
||||||
|
'######## CIBench Category Metric Oracle ########',
|
||||||
|
'cibench_data_manipulation_oracle:scores',
|
||||||
|
'cibench_data_visualization_oracle:scores',
|
||||||
|
'cibench_modeling_oracle:scores',
|
||||||
|
'cibench_nlp_oracle:scores',
|
||||||
|
'cibench_ip_oracle:scores',
|
||||||
|
'cibench_math_oracle:scores',
|
||||||
|
|
||||||
],
|
],
|
||||||
summary_groups=sum(
|
summary_groups=sum(
|
||||||
[v for k, v in locals().items() if k.endswith("_summary_groups")], [])
|
[v for k, v in locals().items() if k.endswith("_summary_groups")], [])
|
||||||
)
|
)
|
@ -1,11 +1,75 @@
|
|||||||
|
|
||||||
_cibench = ['Pandas', 'Matplotlib', 'Opencv', 'SciPy', 'Seaborn', 'PyTorch']
|
_cibench_generation_modules = ['pandas', 'matplotlib', 'opencv', 'scipy', 'seaborn', 'pytorch']
|
||||||
_cibench = ['cibench_' + i for i in _cibench]
|
_cibench_generation = ['cibench_generation/' + i for i in _cibench_generation_modules]
|
||||||
cibench_summary_groups = [{'name': 'cibench', 'subsets': _cibench}]
|
cibench_summary_groups = []
|
||||||
|
_cibench_generation_weight = {
|
||||||
|
'matplotlib': [223, 50, 1, 156],
|
||||||
|
'pandas': [200, 45, 45, 38],
|
||||||
|
'pytorch': [69, 0, 8, 11],
|
||||||
|
'seaborn': [130, 0, 2, 106],
|
||||||
|
'opencv': [177, 21, 6, 106],
|
||||||
|
'scipy': [161, 94, 14, 49],
|
||||||
|
}
|
||||||
|
cibench_summary_groups.extend([
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation:tool_rate',
|
||||||
|
'subsets': [[i, 'tool_rate'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation/' + k : v[0] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation:executable',
|
||||||
|
'subsets': [[i, 'executable'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation/' + k : v[0] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation:numeric_correct',
|
||||||
|
'subsets': [[i, 'numeric_correct'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation/' + k : v[1] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation:text_score',
|
||||||
|
'subsets': [[i, 'text_score'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation/' + k : v[2] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation:vis_sim',
|
||||||
|
'subsets': [[i, 'vis_sim'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation/' + k : v[3] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
])
|
||||||
|
|
||||||
_cibench_template = ['lightgbm', 'matplotlib', 'nltk', 'opencv', 'pandas', 'pytorch',
|
_cibench_generation = ['cibench_generation_oracle/' + i for i in _cibench_generation_modules]
|
||||||
|
cibench_summary_groups.extend([
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation_oracle:tool_rate',
|
||||||
|
'subsets': [[i, 'tool_rate'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation_oracle/' + k : v[0] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation_oracle:executable',
|
||||||
|
'subsets': [[i, 'executable'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation_oracle/' + k : v[0] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation_oracle:numeric_correct',
|
||||||
|
'subsets': [[i, 'numeric_correct'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation_oracle/' + k : v[1] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation_oracle:text_score',
|
||||||
|
'subsets': [[i, 'text_score'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation_oracle/' + k : v[2] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_generation_oracle:vis_sim',
|
||||||
|
'subsets': [[i, 'vis_sim'] for i in _cibench_generation],
|
||||||
|
'weights': {'cibench_generation_oracle/' + k : v[3] for k,v in _cibench_generation_weight.items()},
|
||||||
|
},
|
||||||
|
])
|
||||||
|
|
||||||
|
_cibench_template_modules = ['lightgbm', 'matplotlib', 'nltk', 'opencv', 'pandas', 'pytorch',
|
||||||
'scipy', 'seaborn', 'sklearn', 'tensorflow']
|
'scipy', 'seaborn', 'sklearn', 'tensorflow']
|
||||||
_cibench_template = ['cibench_template/' + i for i in _cibench_template]
|
_cibench_template = ['cibench_template/' + i for i in _cibench_template_modules]
|
||||||
# number of total exec questions in this module
|
# number of total exec questions in this module
|
||||||
_cibench_template_weight = {
|
_cibench_template_weight = {
|
||||||
'lightgbm': [30, 15, 0, 0],
|
'lightgbm': [30, 15, 0, 0],
|
||||||
@ -20,6 +84,11 @@ _cibench_template_weight = {
|
|||||||
'tensorflow': [36, 6, 0, 12],
|
'tensorflow': [36, 6, 0, 12],
|
||||||
}
|
}
|
||||||
cibench_summary_groups.extend([
|
cibench_summary_groups.extend([
|
||||||
|
{
|
||||||
|
'name': 'cibench_template:tool_rate',
|
||||||
|
'subsets': [[i, 'tool_rate'] for i in _cibench_template],
|
||||||
|
'weights': {'cibench_template/' + k : v[0] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
'name': 'cibench_template:executable',
|
'name': 'cibench_template:executable',
|
||||||
'subsets': [[i, 'executable'] for i in _cibench_template],
|
'subsets': [[i, 'executable'] for i in _cibench_template],
|
||||||
@ -42,12 +111,46 @@ cibench_summary_groups.extend([
|
|||||||
},
|
},
|
||||||
])
|
])
|
||||||
|
|
||||||
|
_cibench_template_oracle = ['cibench_template_oracle/' + i for i in _cibench_template_modules]
|
||||||
|
cibench_summary_groups.extend([
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_oracle:tool_rate',
|
||||||
|
'subsets': [[i, 'tool_rate'] for i in _cibench_template_oracle],
|
||||||
|
'weights': {'cibench_template_oracle/' + k : v[0] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_oracle:executable',
|
||||||
|
'subsets': [[i, 'executable'] for i in _cibench_template_oracle],
|
||||||
|
'weights': {'cibench_template_oracle/' + k : v[0] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_oracle:numeric_correct',
|
||||||
|
'subsets': [[i, 'numeric_correct'] for i in _cibench_template_oracle],
|
||||||
|
'weights': {'cibench_template_oracle/' + k : v[1] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_oracle:text_score',
|
||||||
|
'subsets': [[i, 'text_score'] for i in _cibench_template_oracle],
|
||||||
|
'weights': {'cibench_template_oracle/' + k : v[2] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_oracle:vis_sim',
|
||||||
|
'subsets': [[i, 'vis_sim'] for i in _cibench_template_oracle],
|
||||||
|
'weights': {'cibench_template_oracle/' + k : v[3] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
## chinese
|
## chinese
|
||||||
_cibench_template_cn = ['lightgbm', 'matplotlib', 'nltk', 'opencv', 'pandas', 'pytorch',
|
_cibench_template_cn_modules = ['lightgbm', 'matplotlib', 'nltk', 'opencv', 'pandas', 'pytorch',
|
||||||
'scipy', 'seaborn', 'sklearn', 'tensorflow']
|
'scipy', 'seaborn', 'sklearn', 'tensorflow']
|
||||||
_cibench_template_cn = ['cibench_template_chinese/' + i for i in _cibench_template_cn]
|
_cibench_template_cn = ['cibench_template_chinese/' + i for i in _cibench_template_cn_modules]
|
||||||
cibench_summary_groups.extend([
|
cibench_summary_groups.extend([
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_cn:tool_rate',
|
||||||
|
'subsets': [[i, 'tool_rate'] for i in _cibench_template_cn],
|
||||||
|
'weights': {'cibench_template_chinese/' + k : v[0] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
'name': 'cibench_template_cn:executable',
|
'name': 'cibench_template_cn:executable',
|
||||||
'subsets': [[i, 'executable'] for i in _cibench_template_cn],
|
'subsets': [[i, 'executable'] for i in _cibench_template_cn],
|
||||||
@ -70,40 +173,223 @@ cibench_summary_groups.extend([
|
|||||||
},
|
},
|
||||||
])
|
])
|
||||||
|
|
||||||
|
_cibench_template_cn_oracle = ['cibench_template_oracle_chinese/' + i for i in _cibench_template_cn_modules]
|
||||||
## add more without nltk
|
|
||||||
cibench_summary_groups.extend([
|
cibench_summary_groups.extend([
|
||||||
{
|
{
|
||||||
'name': 'cibench_template_wo_nltk:executable',
|
'name': 'cibench_template_cn_oracle:tool_rate',
|
||||||
'subsets': [[i, 'executable'] for i in _cibench_template if 'nltk' not in i],
|
'subsets': [[i, 'tool_rate'] for i in _cibench_template_cn_oracle],
|
||||||
'weights': {'cibench_template/' + k : v[0] for k,v in _cibench_template_weight.items() if 'nltk' not in k},
|
'weights': {'cibench_template_oracle_chinese/' + k : v[0] for k,v in _cibench_template_weight.items()},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name': 'cibench_template_wo_nltk:numeric_correct',
|
'name': 'cibench_template_cn_oracle:executable',
|
||||||
'subsets': [[i, 'numeric_correct'] for i in _cibench_template if 'nltk' not in i],
|
'subsets': [[i, 'executable'] for i in _cibench_template_cn_oracle],
|
||||||
'weights': {'cibench_template/' + k : v[1] for k,v in _cibench_template_weight.items() if 'nltk' not in k},
|
'weights': {'cibench_template_oracle_chinese/' + k : v[0] for k,v in _cibench_template_weight.items()},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name': 'cibench_template_wo_nltk:vis_sim',
|
'name': 'cibench_template_cn_oracle:numeric_correct',
|
||||||
'subsets': [[i, 'vis_sim'] for i in _cibench_template if 'nltk' not in i],
|
'subsets': [[i, 'numeric_correct'] for i in _cibench_template_cn_oracle],
|
||||||
'weights': {'cibench_template/' + k : v[3] for k,v in _cibench_template_weight.items() if 'nltk' not in k},
|
'weights': {'cibench_template_oracle_chinese/' + k : v[1] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_cn_oracle:text_score',
|
||||||
|
'subsets': [[i, 'text_score'] for i in _cibench_template_cn_oracle],
|
||||||
|
'weights': {'cibench_template_oracle_chinese/' + k : v[2] for k,v in _cibench_template_weight.items()},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_template_cn_oracle:vis_sim',
|
||||||
|
'subsets': [[i, 'vis_sim'] for i in _cibench_template_cn_oracle],
|
||||||
|
'weights': {'cibench_template_oracle_chinese/' + k : v[3] for k,v in _cibench_template_weight.items()},
|
||||||
},
|
},
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
|
########### New summerizer for Category metric
|
||||||
|
|
||||||
|
cibench_data_manipulation = [
|
||||||
|
['cibench_generation/pandas', 'numeric_correct', _cibench_generation_weight['pandas'][1]],
|
||||||
|
['cibench_generation/pandas', 'text_score', _cibench_generation_weight['pandas'][2]],
|
||||||
|
['cibench_generation/pandas', 'vis_sim', _cibench_generation_weight['pandas'][3]],
|
||||||
|
['cibench_template/pandas', 'numeric_correct', _cibench_template_weight['pandas'][1]],
|
||||||
|
['cibench_template/pandas', 'text_score', _cibench_template_weight['pandas'][2]],
|
||||||
|
['cibench_template/pandas', 'vis_sim', _cibench_template_weight['pandas'][3]],
|
||||||
|
]
|
||||||
|
cibench_data_visualization = [
|
||||||
|
['cibench_generation/matplotlib', 'numeric_correct', _cibench_generation_weight['matplotlib'][1]],
|
||||||
|
['cibench_generation/matplotlib', 'text_score', _cibench_generation_weight['matplotlib'][2]],
|
||||||
|
['cibench_generation/matplotlib', 'vis_sim', _cibench_generation_weight['matplotlib'][3]],
|
||||||
|
['cibench_generation/seaborn', 'numeric_correct', _cibench_generation_weight['seaborn'][1]],
|
||||||
|
['cibench_generation/seaborn', 'text_score', _cibench_generation_weight['seaborn'][2]],
|
||||||
|
['cibench_generation/seaborn', 'vis_sim', _cibench_generation_weight['seaborn'][3]],
|
||||||
|
['cibench_template/matplotlib', 'numeric_correct', _cibench_template_weight['matplotlib'][1]],
|
||||||
|
['cibench_template/matplotlib', 'text_score', _cibench_template_weight['matplotlib'][2]],
|
||||||
|
['cibench_template/matplotlib', 'vis_sim', _cibench_template_weight['matplotlib'][3]],
|
||||||
|
['cibench_template/seaborn', 'numeric_correct', _cibench_template_weight['seaborn'][1]],
|
||||||
|
['cibench_template/seaborn', 'text_score', _cibench_template_weight['seaborn'][2]],
|
||||||
|
['cibench_template/seaborn', 'vis_sim', _cibench_template_weight['seaborn'][3]],
|
||||||
|
]
|
||||||
|
cibench_modeling = [
|
||||||
|
['cibench_generation/pytorch', 'numeric_correct', _cibench_generation_weight['pytorch'][1]],
|
||||||
|
['cibench_generation/pytorch', 'text_score', _cibench_generation_weight['pytorch'][2]],
|
||||||
|
['cibench_generation/pytorch', 'vis_sim', _cibench_generation_weight['pytorch'][3]],
|
||||||
|
['cibench_template/pytorch', 'numeric_correct', _cibench_template_weight['pytorch'][1]],
|
||||||
|
['cibench_template/pytorch', 'text_score', _cibench_template_weight['pytorch'][2]],
|
||||||
|
['cibench_template/pytorch', 'vis_sim', _cibench_template_weight['pytorch'][3]],
|
||||||
|
['cibench_template/sklearn', 'numeric_correct', _cibench_template_weight['sklearn'][1]],
|
||||||
|
['cibench_template/sklearn', 'text_score', _cibench_template_weight['sklearn'][2]],
|
||||||
|
['cibench_template/sklearn', 'vis_sim', _cibench_template_weight['sklearn'][3]],
|
||||||
|
['cibench_template/tensorflow', 'numeric_correct', _cibench_template_weight['tensorflow'][1]],
|
||||||
|
['cibench_template/tensorflow', 'text_score', _cibench_template_weight['tensorflow'][2]],
|
||||||
|
['cibench_template/tensorflow', 'vis_sim', _cibench_template_weight['tensorflow'][3]],
|
||||||
|
['cibench_template/lightgbm', 'numeric_correct', _cibench_template_weight['lightgbm'][1]],
|
||||||
|
['cibench_template/lightgbm', 'text_score', _cibench_template_weight['lightgbm'][2]],
|
||||||
|
['cibench_template/lightgbm', 'vis_sim', _cibench_template_weight['lightgbm'][3]],
|
||||||
|
]
|
||||||
|
cibench_nlp = [
|
||||||
|
['cibench_template/nltk', 'numeric_correct', _cibench_template_weight['nltk'][1]],
|
||||||
|
['cibench_template/nltk', 'text_score', _cibench_template_weight['nltk'][2]],
|
||||||
|
['cibench_template/nltk', 'vis_sim', _cibench_template_weight['nltk'][3]],
|
||||||
|
]
|
||||||
|
cibench_ip = [
|
||||||
|
['cibench_generation/opencv', 'numeric_correct', _cibench_generation_weight['opencv'][1]],
|
||||||
|
['cibench_generation/opencv', 'text_score', _cibench_generation_weight['opencv'][2]],
|
||||||
|
['cibench_generation/opencv', 'vis_sim', _cibench_generation_weight['opencv'][3]],
|
||||||
|
['cibench_template/opencv', 'numeric_correct', _cibench_template_weight['opencv'][1]],
|
||||||
|
['cibench_template/opencv', 'text_score', _cibench_template_weight['opencv'][2]],
|
||||||
|
['cibench_template/opencv', 'vis_sim', _cibench_template_weight['opencv'][3]],
|
||||||
|
]
|
||||||
|
cibench_math = [
|
||||||
|
['cibench_generation/scipy', 'numeric_correct', _cibench_generation_weight['scipy'][1]],
|
||||||
|
['cibench_generation/scipy', 'text_score', _cibench_generation_weight['scipy'][2]],
|
||||||
|
['cibench_generation/scipy', 'vis_sim', _cibench_generation_weight['scipy'][3]],
|
||||||
|
['cibench_template/scipy', 'numeric_correct', _cibench_template_weight['scipy'][1]],
|
||||||
|
['cibench_template/scipy', 'text_score', _cibench_template_weight['scipy'][2]],
|
||||||
|
['cibench_template/scipy', 'vis_sim', _cibench_template_weight['scipy'][3]],
|
||||||
|
]
|
||||||
cibench_summary_groups.extend([
|
cibench_summary_groups.extend([
|
||||||
{
|
{
|
||||||
'name': 'cibench_template_cn_wo_nltk:executable',
|
'name': 'cibench_data_manipulation:scores',
|
||||||
'subsets': [[i, 'executable'] for i in _cibench_template_cn if 'nltk' not in i],
|
'subsets': [i[:2] for i in cibench_data_manipulation],
|
||||||
'weights': {'cibench_template_chinese/' + k : v[0] for k,v in _cibench_template_weight.items() if 'nltk' not in k},
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_data_manipulation},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name': 'cibench_template_cn_wo_nltk:numeric_correct',
|
'name': 'cibench_data_visualization:scores',
|
||||||
'subsets': [[i, 'numeric_correct'] for i in _cibench_template_cn if 'nltk' not in i],
|
'subsets': [i[:2] for i in cibench_data_visualization],
|
||||||
'weights': {'cibench_template_chinese/' + k : v[1] for k,v in _cibench_template_weight.items() if 'nltk' not in k},
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_data_visualization},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'name': 'cibench_template_cn_wo_nltk:vis_sim',
|
'name': 'cibench_modeling:scores',
|
||||||
'subsets': [[i, 'vis_sim'] for i in _cibench_template_cn if 'nltk' not in i],
|
'subsets': [i[:2] for i in cibench_modeling],
|
||||||
'weights': {'cibench_template_chinese/' + k : v[3] for k,v in _cibench_template_weight.items() if 'nltk' not in k},
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_modeling},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_nlp:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_nlp],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_nlp},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_ip:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_ip],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_ip},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_math:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_math],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_math},
|
||||||
},
|
},
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
|
########### New summerizer for Category metric oracle
|
||||||
|
|
||||||
|
cibench_data_manipulation = [
|
||||||
|
['cibench_generation_oracle/pandas', 'numeric_correct', _cibench_generation_weight['pandas'][1]],
|
||||||
|
['cibench_generation_oracle/pandas', 'text_score', _cibench_generation_weight['pandas'][2]],
|
||||||
|
['cibench_generation_oracle/pandas', 'vis_sim', _cibench_generation_weight['pandas'][3]],
|
||||||
|
['cibench_template_oracle/pandas', 'numeric_correct', _cibench_template_weight['pandas'][1]],
|
||||||
|
['cibench_template_oracle/pandas', 'text_score', _cibench_template_weight['pandas'][2]],
|
||||||
|
['cibench_template_oracle/pandas', 'vis_sim', _cibench_template_weight['pandas'][3]],
|
||||||
|
]
|
||||||
|
cibench_data_visualization = [
|
||||||
|
['cibench_generation_oracle/matplotlib', 'numeric_correct', _cibench_generation_weight['matplotlib'][1]],
|
||||||
|
['cibench_generation_oracle/matplotlib', 'text_score', _cibench_generation_weight['matplotlib'][2]],
|
||||||
|
['cibench_generation_oracle/matplotlib', 'vis_sim', _cibench_generation_weight['matplotlib'][3]],
|
||||||
|
['cibench_generation_oracle/seaborn', 'numeric_correct', _cibench_generation_weight['seaborn'][1]],
|
||||||
|
['cibench_generation_oracle/seaborn', 'text_score', _cibench_generation_weight['seaborn'][2]],
|
||||||
|
['cibench_generation_oracle/seaborn', 'vis_sim', _cibench_generation_weight['seaborn'][3]],
|
||||||
|
['cibench_template_oracle/matplotlib', 'numeric_correct', _cibench_template_weight['matplotlib'][1]],
|
||||||
|
['cibench_template_oracle/matplotlib', 'text_score', _cibench_template_weight['matplotlib'][2]],
|
||||||
|
['cibench_template_oracle/matplotlib', 'vis_sim', _cibench_template_weight['matplotlib'][3]],
|
||||||
|
['cibench_template_oracle/seaborn', 'numeric_correct', _cibench_template_weight['seaborn'][1]],
|
||||||
|
['cibench_template_oracle/seaborn', 'text_score', _cibench_template_weight['seaborn'][2]],
|
||||||
|
['cibench_template_oracle/seaborn', 'vis_sim', _cibench_template_weight['seaborn'][3]],
|
||||||
|
]
|
||||||
|
cibench_modeling = [
|
||||||
|
['cibench_generation_oracle/pytorch', 'numeric_correct', _cibench_generation_weight['pytorch'][1]],
|
||||||
|
['cibench_generation_oracle/pytorch', 'text_score', _cibench_generation_weight['pytorch'][2]],
|
||||||
|
['cibench_generation_oracle/pytorch', 'vis_sim', _cibench_generation_weight['pytorch'][3]],
|
||||||
|
['cibench_template_oracle/pytorch', 'numeric_correct', _cibench_template_weight['pytorch'][1]],
|
||||||
|
['cibench_template_oracle/pytorch', 'text_score', _cibench_template_weight['pytorch'][2]],
|
||||||
|
['cibench_template_oracle/pytorch', 'vis_sim', _cibench_template_weight['pytorch'][3]],
|
||||||
|
['cibench_template_oracle/sklearn', 'numeric_correct', _cibench_template_weight['sklearn'][1]],
|
||||||
|
['cibench_template_oracle/sklearn', 'text_score', _cibench_template_weight['sklearn'][2]],
|
||||||
|
['cibench_template_oracle/sklearn', 'vis_sim', _cibench_template_weight['sklearn'][3]],
|
||||||
|
['cibench_template_oracle/tensorflow', 'numeric_correct', _cibench_template_weight['tensorflow'][1]],
|
||||||
|
['cibench_template_oracle/tensorflow', 'text_score', _cibench_template_weight['tensorflow'][2]],
|
||||||
|
['cibench_template_oracle/tensorflow', 'vis_sim', _cibench_template_weight['tensorflow'][3]],
|
||||||
|
['cibench_template_oracle/lightgbm', 'numeric_correct', _cibench_template_weight['lightgbm'][1]],
|
||||||
|
['cibench_template_oracle/lightgbm', 'text_score', _cibench_template_weight['lightgbm'][2]],
|
||||||
|
['cibench_template_oracle/lightgbm', 'vis_sim', _cibench_template_weight['lightgbm'][3]],
|
||||||
|
]
|
||||||
|
cibench_nlp = [
|
||||||
|
['cibench_template_oracle/nltk', 'numeric_correct', _cibench_template_weight['nltk'][1]],
|
||||||
|
['cibench_template_oracle/nltk', 'text_score', _cibench_template_weight['nltk'][2]],
|
||||||
|
['cibench_template_oracle/nltk', 'vis_sim', _cibench_template_weight['nltk'][3]],
|
||||||
|
]
|
||||||
|
cibench_ip = [
|
||||||
|
['cibench_generation_oracle/opencv', 'numeric_correct', _cibench_generation_weight['opencv'][1]],
|
||||||
|
['cibench_generation_oracle/opencv', 'text_score', _cibench_generation_weight['opencv'][2]],
|
||||||
|
['cibench_generation_oracle/opencv', 'vis_sim', _cibench_generation_weight['opencv'][3]],
|
||||||
|
['cibench_template_oracle/opencv', 'numeric_correct', _cibench_template_weight['opencv'][1]],
|
||||||
|
['cibench_template_oracle/opencv', 'text_score', _cibench_template_weight['opencv'][2]],
|
||||||
|
['cibench_template_oracle/opencv', 'vis_sim', _cibench_template_weight['opencv'][3]],
|
||||||
|
]
|
||||||
|
cibench_math = [
|
||||||
|
['cibench_generation_oracle/scipy', 'numeric_correct', _cibench_generation_weight['scipy'][1]],
|
||||||
|
['cibench_generation_oracle/scipy', 'text_score', _cibench_generation_weight['scipy'][2]],
|
||||||
|
['cibench_generation_oracle/scipy', 'vis_sim', _cibench_generation_weight['scipy'][3]],
|
||||||
|
['cibench_template_oracle/scipy', 'numeric_correct', _cibench_template_weight['scipy'][1]],
|
||||||
|
['cibench_template_oracle/scipy', 'text_score', _cibench_template_weight['scipy'][2]],
|
||||||
|
['cibench_template_oracle/scipy', 'vis_sim', _cibench_template_weight['scipy'][3]],
|
||||||
|
]
|
||||||
|
cibench_summary_groups.extend([
|
||||||
|
{
|
||||||
|
'name': 'cibench_data_manipulation_oracle:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_data_manipulation],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_data_manipulation},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_data_visualization_oracle:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_data_visualization],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_data_visualization},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_modeling_oracle:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_modeling],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_modeling},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_nlp_oracle:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_nlp],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_nlp},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_ip_oracle:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_ip],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_ip},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'cibench_math_oracle:scores',
|
||||||
|
'subsets': [i[:2] for i in cibench_math],
|
||||||
|
'weights': {f'{k[0]}@{k[1]}': k[-1] for k in cibench_math},
|
||||||
|
},
|
||||||
|
])
|
@ -4,72 +4,19 @@ import os.path as osp
|
|||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
from inspect import signature
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
|
|
||||||
|
from opencompass.datasets.base import BaseDataset
|
||||||
|
from opencompass.datasets.gsm8k import gsm8k_postprocess
|
||||||
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
||||||
from opencompass.registry import ICL_EVALUATORS, LOAD_DATASET
|
from opencompass.registry import ICL_EVALUATORS, LOAD_DATASET
|
||||||
|
|
||||||
from .base import BaseDataset
|
|
||||||
|
|
||||||
|
|
||||||
def load_experiment(file: str) -> dict:
|
def load_experiment(file: str) -> dict:
|
||||||
"""Load single experiment file with solutions."""
|
|
||||||
with open(file, 'r') as f:
|
|
||||||
notebook = json.load(f)
|
|
||||||
example = notebook['cells']
|
|
||||||
metadata = notebook['metadata']
|
|
||||||
modules = metadata.get('modules', [])
|
|
||||||
if modules:
|
|
||||||
# these two annotations should be the same
|
|
||||||
assert len(modules) == len(metadata.get('step_types'))
|
|
||||||
# reformat annotations
|
|
||||||
modules = [[_m.strip() for _m in _modules.split('&')]
|
|
||||||
for _modules in modules]
|
|
||||||
questions = []
|
|
||||||
source_codes = []
|
|
||||||
outputs = []
|
|
||||||
tags = []
|
|
||||||
for cell in example:
|
|
||||||
if cell['cell_type'] == 'markdown':
|
|
||||||
text = ''.join(cell['source']).strip()
|
|
||||||
if modules:
|
|
||||||
_modules = modules.pop(0)
|
|
||||||
text += f"Please use {' and '.join(_modules)} modules."
|
|
||||||
text = text.strip() + '\n'
|
|
||||||
# append the formatted text
|
|
||||||
questions.append(text)
|
|
||||||
elif cell['cell_type'] == 'code':
|
|
||||||
source_codes.append(''.join(cell['source']))
|
|
||||||
if cell['outputs'] and 'data' in cell['outputs'][-1]:
|
|
||||||
if 'image/png' in cell['outputs'][-1]['data']:
|
|
||||||
# skip vis temporarily due to lack of evaluation
|
|
||||||
tags.append('vis')
|
|
||||||
outputs.append(
|
|
||||||
cell['outputs'][-1]['data']['image/png'])
|
|
||||||
elif 'text/plain' in cell['outputs'][-1]['data']:
|
|
||||||
tags.append('general')
|
|
||||||
outputs.append(''.join(
|
|
||||||
cell['outputs'][-1]['data']['text/plain']))
|
|
||||||
else:
|
|
||||||
tags.append('exec')
|
|
||||||
outputs.append(None)
|
|
||||||
return dict(
|
|
||||||
experiment=file,
|
|
||||||
questions=sum(([
|
|
||||||
dict(role='user', content=question),
|
|
||||||
dict(role='assistant', content=source_code)
|
|
||||||
] for question, source_code in zip(questions, source_codes)), []),
|
|
||||||
references=dict(outputs=outputs,
|
|
||||||
tags=tags,
|
|
||||||
metadata=metadata,
|
|
||||||
experiment=file),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def load_experiment_template(file: str) -> dict:
|
|
||||||
"""Load single experiment file with solutions for template experiment."""
|
"""Load single experiment file with solutions for template experiment."""
|
||||||
with open(file, 'r') as f:
|
with open(file, 'r') as f:
|
||||||
notebook = json.load(f)
|
notebook = json.load(f)
|
||||||
@ -84,11 +31,17 @@ def load_experiment_template(file: str) -> dict:
|
|||||||
for _modules in modules]
|
for _modules in modules]
|
||||||
questions = []
|
questions = []
|
||||||
source_codes = []
|
source_codes = []
|
||||||
|
thoughts = []
|
||||||
outputs = []
|
outputs = []
|
||||||
tags = []
|
tags = []
|
||||||
|
|
||||||
for cell in example:
|
for cell in example:
|
||||||
if cell['cell_type'] == 'markdown':
|
if cell['cell_type'] == 'markdown':
|
||||||
text = ''.join(cell['source']).strip()
|
text = ''.join(cell['source']).strip()
|
||||||
|
try:
|
||||||
|
text, thought = text.split('\n\nThought: ')
|
||||||
|
except ValueError:
|
||||||
|
thought = ' '
|
||||||
if modules:
|
if modules:
|
||||||
_modules = modules.pop(0)
|
_modules = modules.pop(0)
|
||||||
if 'chinese' not in file:
|
if 'chinese' not in file:
|
||||||
@ -98,6 +51,7 @@ def load_experiment_template(file: str) -> dict:
|
|||||||
text = text.strip() + '\n'
|
text = text.strip() + '\n'
|
||||||
# append the formatted text
|
# append the formatted text
|
||||||
questions.append(text)
|
questions.append(text)
|
||||||
|
thoughts.append(thought)
|
||||||
elif cell['cell_type'] == 'code':
|
elif cell['cell_type'] == 'code':
|
||||||
source_codes.append(''.join(cell['source']))
|
source_codes.append(''.join(cell['source']))
|
||||||
output_flag = False
|
output_flag = False
|
||||||
@ -132,8 +86,10 @@ def load_experiment_template(file: str) -> dict:
|
|||||||
experiment=file,
|
experiment=file,
|
||||||
questions=sum(([
|
questions=sum(([
|
||||||
dict(role='user', content=question),
|
dict(role='user', content=question),
|
||||||
dict(role='assistant', content=source_code)
|
dict(role='assistant', content=thought + '**split**' + source_code)
|
||||||
] for question, source_code in zip(questions, source_codes)), []),
|
]
|
||||||
|
for question, source_code, thought in zip(
|
||||||
|
questions, source_codes, thoughts)), []),
|
||||||
references=dict(outputs=outputs,
|
references=dict(outputs=outputs,
|
||||||
tags=tags,
|
tags=tags,
|
||||||
metadata=metadata,
|
metadata=metadata,
|
||||||
@ -156,7 +112,7 @@ def check_internet():
|
|||||||
|
|
||||||
@LOAD_DATASET.register_module()
|
@LOAD_DATASET.register_module()
|
||||||
class CIBenchDataset(BaseDataset):
|
class CIBenchDataset(BaseDataset):
|
||||||
"""Code Interpreter dataset."""
|
"""Code Interpreter dataset for template dataset."""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load(path: str, internet_check: bool = False):
|
def load(path: str, internet_check: bool = False):
|
||||||
@ -183,43 +139,75 @@ class CIBenchDataset(BaseDataset):
|
|||||||
return dataset
|
return dataset
|
||||||
|
|
||||||
|
|
||||||
@LOAD_DATASET.register_module()
|
def sklearn_ssim(pred_img, target_img):
|
||||||
class CIBenchTemplateDataset(BaseDataset):
|
import base64
|
||||||
"""Code Interpreter dataset for template dataset."""
|
|
||||||
|
|
||||||
@staticmethod
|
import skimage
|
||||||
def load(path: str, internet_check: bool = False):
|
img2 = base64.b64decode(target_img)
|
||||||
"""Load whole dataset.
|
img2 = skimage.io.imread(img2, plugin='imageio')
|
||||||
|
img1 = skimage.io.imread(pred_img, plugin='imageio')
|
||||||
Args:
|
img1 = skimage.transform.resize(img1, img2.shape[:2])
|
||||||
path(str): Path of cibench dataset.
|
img1 = 255 * img1
|
||||||
internet_check(bool): Whether to check internet.
|
# Convert to integer data type pixels.
|
||||||
Defaults to False.
|
img1 = img1.astype(np.uint8)
|
||||||
"""
|
ssim = skimage.metrics.structural_similarity(img1, img2, channel_axis=-1)
|
||||||
if internet_check:
|
return ssim
|
||||||
check_internet()
|
|
||||||
assert os.path.exists(path), f'Path {path} does not exist.'
|
|
||||||
data_list = []
|
|
||||||
for cwd, dirs, files in os.walk(path):
|
|
||||||
dirs.sort()
|
|
||||||
files.sort()
|
|
||||||
for f in files:
|
|
||||||
if '.ipynb' in f:
|
|
||||||
data = load_experiment_template(os.path.join(cwd, f))
|
|
||||||
data_list.append(data)
|
|
||||||
|
|
||||||
dataset = Dataset.from_list(data_list)
|
|
||||||
return dataset
|
|
||||||
|
|
||||||
|
|
||||||
|
JUDGE_PROMPT_CN = """你是一个擅长评价可视化能力的助手。
|
||||||
|
请你以公正的评判者的身份,评估一个AI模型对可视化相关问题生成的代码所绘制图像的质量。
|
||||||
|
我们会给您提供一个代码可视化问题,和需要你评估的AI模型生成的代码所绘制的图像。当你开始你的评估时,你需要遵守以下的流程:
|
||||||
|
1. 针对图像,给可视化能力一个1~10的分数,仅需返回数字,无需任何其他描述。
|
||||||
|
2. 你的打分需要尽可能严格,并且要遵守下面的评分规则:总的来说,模型回答的质量越高,则分数越高。
|
||||||
|
|
||||||
|
当图像完全无法反映出所给定的指令内容时,此类评分得到1到2分。
|
||||||
|
当图像能够部分体现出所给定的指令内容,但在具体的细节表达上有很大的缺失时,此类评分为3到4分。
|
||||||
|
当图像基本能够符合所给定的指令,但是在图像的美观性上呈现一般,没有特别出彩的地方时,此类评分可以得到5到6分。
|
||||||
|
当图像能够较好地匹配上所给的指令,并且在图像的美观性上有所表现,如在颜色搭配、形状设计等方面有一些新意时,此类评分可以得到7到8分。
|
||||||
|
当图像完全匹配上所给的指令,涵盖了指令中的所有细节,并且在图像的美观性上表现出色,此类评分才能得到9到10分。
|
||||||
|
|
||||||
|
[可视化问题]:{question}
|
||||||
|
""" # noqa
|
||||||
|
|
||||||
|
JUDGE_PROMPT = """You are an assistant skilled in assessing visualization capabilities.
|
||||||
|
In the capacity of a fair judge, you will evaluate the quality of images drawn by an AI model generating code for visualization-related problems. We will provide you with a code visualization problem and an image drawn by the code created by the AI model you need to assess. When you start your assessment, you must adhere to the following process:
|
||||||
|
1. Rate the visualization capability with a score between 1 and 10 for the image, returning only the number without any additional descriptions.
|
||||||
|
2. Your scoring needs to be as rigorous as possible, and it should follow the scoring rules below: Overall, the higher the quality of the model's response, the higher the score.
|
||||||
|
|
||||||
|
A score of 1 to 2 is given when the image cannot reflect the given instruction content at all.
|
||||||
|
A score of 3 to 4 is given when the image can partly reflect the given instruction content, but there is a significant lack of specific detail expression.
|
||||||
|
If the image basically meets the given instructions, but the aesthetic quality of the image is average without any outstanding features, this kind of rating can get a score of 5 to 6.
|
||||||
|
When the image matches the given instructions well, and shows some aesthetic appeal, such as some originality in color matching and shape design, this kind of rating can get a score of 7 to 8.
|
||||||
|
Only when the image completely matches the given instructions, covers all the details in the instructions, and performs excellently in terms of aesthetics, can this kind of rating get a score of 9 to 10.
|
||||||
|
|
||||||
|
[Visualization Problem]:{question}
|
||||||
|
""" # noqa
|
||||||
|
|
||||||
|
|
||||||
|
def vl_model_score(model, pred_img, ori_prompt, judge_prompt):
|
||||||
|
response = model.interleave_generate(
|
||||||
|
[judge_prompt.format(question=ori_prompt), pred_img])
|
||||||
|
score = gsm8k_postprocess(response)
|
||||||
|
try:
|
||||||
|
score = int(float(score))
|
||||||
|
assert score <= 10 and score >= 1
|
||||||
|
return score / 10
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f'Evaluation failed {e}. Check log for details.')
|
||||||
|
|
||||||
|
|
||||||
|
@ICL_EVALUATORS.register_module()
|
||||||
class CIBenchEvaluator(BaseEvaluator):
|
class CIBenchEvaluator(BaseEvaluator):
|
||||||
"""Evaluator for CI dataset.
|
"""Evaluator for CI dataset.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
text_evaluator (optional, dict): The text evaluator for text result
|
text_evaluator (optional, dict): The text evaluator for text result
|
||||||
comparison[]. Defaults to None, which use Rouge as defaults.
|
comparison[]. Defaults to None, which use rouge as defaults.
|
||||||
Please notice that a extra key for `metric_name` should be set
|
Please notice that a extra key for `metric_name` should be set
|
||||||
to get the exact metric result, such as `rouge1`.
|
to get the exact metric result, such as `rouge1`.
|
||||||
|
vis_evaluator (optional, dict): The vis evaluator for visualization
|
||||||
|
score. Defaults to None, which means use skimage. Otherwise
|
||||||
|
provide dict from VLMEvalKit.
|
||||||
output_dir (optional, str): The directory to save experiment
|
output_dir (optional, str): The directory to save experiment
|
||||||
files in a markdown or notebook format.
|
files in a markdown or notebook format.
|
||||||
with_ipynb (bool): Generate ipynb correspondingly.
|
with_ipynb (bool): Generate ipynb correspondingly.
|
||||||
@ -231,9 +219,12 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
text_evaluator: Optional[dict] = None,
|
text_evaluator: Optional[dict] = None,
|
||||||
|
vis_evaluator: Optional[dict] = None,
|
||||||
output_dir: Optional[str] = None,
|
output_dir: Optional[str] = None,
|
||||||
with_ipynb: bool = False,
|
with_ipynb: bool = False,
|
||||||
|
lang: str = 'en',
|
||||||
user_data_dir: str = 'ENV') -> None:
|
user_data_dir: str = 'ENV') -> None:
|
||||||
|
# build text evaluator
|
||||||
if text_evaluator is None:
|
if text_evaluator is None:
|
||||||
from opencompass.openicl.icl_evaluator import RougeEvaluator
|
from opencompass.openicl.icl_evaluator import RougeEvaluator
|
||||||
self.text_evaluator = ICL_EVALUATORS.build(
|
self.text_evaluator = ICL_EVALUATORS.build(
|
||||||
@ -242,6 +233,22 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
else:
|
else:
|
||||||
self.text_eval_metric = text_evaluator.pop('metric_name')
|
self.text_eval_metric = text_evaluator.pop('metric_name')
|
||||||
self.text_evaluator = ICL_EVALUATORS.build(text_evaluator)
|
self.text_evaluator = ICL_EVALUATORS.build(text_evaluator)
|
||||||
|
# build visual evaluator
|
||||||
|
if vis_evaluator is None:
|
||||||
|
self.vis_evaluator = None
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
from vlmeval.config import supported_VLM
|
||||||
|
except ImportError as e:
|
||||||
|
raise ImportError(
|
||||||
|
f'{e}. Please install vlmeval following: https://github.com/open-compass/VLMEvalKit' # noqa
|
||||||
|
)
|
||||||
|
assert vis_evaluator['type'] in supported_VLM, ''
|
||||||
|
self.vis_evaluator = supported_VLM[vis_evaluator.pop('type')](
|
||||||
|
**vis_evaluator)
|
||||||
|
|
||||||
|
assert lang in ['en', 'cn'], 'Only `en` and `cn` are supported.'
|
||||||
|
self.lang = lang
|
||||||
# TODO: should use work dir for this task.
|
# TODO: should use work dir for this task.
|
||||||
self.output_dir = output_dir
|
self.output_dir = output_dir
|
||||||
self.user_data_dir = self.check_user_data_dir(user_data_dir)
|
self.user_data_dir = self.check_user_data_dir(user_data_dir)
|
||||||
@ -276,14 +283,14 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
for action in step[::-1]:
|
for action in step[::-1]:
|
||||||
if action['type'] == 'IPythonInterpreter':
|
if action['type'] == 'IPythonInterpreter':
|
||||||
if action['errmsg']:
|
if action['errmsg']:
|
||||||
return False
|
return True, False
|
||||||
else:
|
else:
|
||||||
return True
|
return True, True
|
||||||
# No code interpreter for this step, reckon as False
|
# No code interpreter for this step, reckon as False
|
||||||
return False
|
return False, False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def correct_step(step, target):
|
def correct_step(step, target) -> dict:
|
||||||
"""Whether the step output is correct."""
|
"""Whether the step output is correct."""
|
||||||
# Found the latest code interpreter to determine correct
|
# Found the latest code interpreter to determine correct
|
||||||
for action in step[::-1]:
|
for action in step[::-1]:
|
||||||
@ -310,13 +317,13 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
out = match.group(1)
|
out = match.group(1)
|
||||||
score = (out.strip() == target.strip()
|
score = (out.strip() == target.strip()
|
||||||
or target.strip() in out.strip())
|
or target.strip() in out.strip())
|
||||||
return score
|
return {'score': score, 'gt': target, 'pred': out}
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return {'score': 0, 'gt': target}
|
||||||
# Fall back to False
|
# Fall back to False
|
||||||
return False
|
return {'score': 0, 'gt': target}
|
||||||
|
|
||||||
def text_step(self, step, target):
|
def text_step(self, step, target) -> dict:
|
||||||
"""Whether the step output is correct."""
|
"""Whether the step output is correct."""
|
||||||
# Found the latest code interpreter to determine correct
|
# Found the latest code interpreter to determine correct
|
||||||
for action in step[::-1]:
|
for action in step[::-1]:
|
||||||
@ -328,51 +335,56 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
if match:
|
if match:
|
||||||
out = match.group(1)
|
out = match.group(1)
|
||||||
score = self.text_evaluator.score([out], [target])
|
score = self.text_evaluator.score([out], [target])
|
||||||
return score[self.text_eval_metric] / 100
|
score = score[self.text_eval_metric] / 100
|
||||||
|
return {
|
||||||
|
'score': score,
|
||||||
|
'gt_text': target,
|
||||||
|
'pred_text': out
|
||||||
|
}
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return {'score': 0, 'gt_text': target}
|
||||||
# Fall back to False
|
# Fall back to False
|
||||||
return False
|
return {'score': 0, 'gt_text': target}
|
||||||
|
|
||||||
@staticmethod
|
def vis_similarity_step(self, step, target, ori_prompt) -> dict:
|
||||||
def vis_similarity_step(step, target):
|
|
||||||
"""Whether the step output image has the same structure similarity with
|
"""Whether the step output image has the same structure similarity with
|
||||||
the given images."""
|
the given images."""
|
||||||
# Found the latest code interpreter to determine correct
|
# Found the latest code interpreter to determine correct
|
||||||
import base64
|
|
||||||
|
|
||||||
import skimage
|
|
||||||
|
|
||||||
for action in step[::-1]:
|
for action in step[::-1]:
|
||||||
if action['type'] == 'IPythonInterpreter':
|
if action['type'] == 'IPythonInterpreter':
|
||||||
if action['result']:
|
if action['result']:
|
||||||
try:
|
try:
|
||||||
pred = action['result']['text']
|
pred = action['result']['image_path']
|
||||||
match = re.search(r'!\[fig-[0-9]*\]\((.*?)\)', pred,
|
match = re.search(r'!\[fig-[0-9]*\]\((.*?)\)', pred,
|
||||||
re.DOTALL)
|
re.DOTALL)
|
||||||
if match:
|
if match:
|
||||||
img_pred = match.group(1)
|
img_pred = match.group(1)
|
||||||
img2 = base64.b64decode(target)
|
if self.vis_evaluator is None:
|
||||||
img2 = skimage.io.imread(img2, plugin='imageio')
|
# ssim greater better
|
||||||
img1 = skimage.io.imread(img_pred, plugin='imageio')
|
score = sklearn_ssim(img_pred, target)
|
||||||
img1 = skimage.transform.resize(img1, img2.shape[:2])
|
return {'score': score, 'pred_img': img_pred}
|
||||||
img1 = 255 * img1
|
else:
|
||||||
# Convert to integer data type pixels.
|
# TODO: the following code will be removed later.
|
||||||
img1 = img1.astype(np.uint8)
|
if self.lang == 'cn':
|
||||||
ssim = skimage.metrics.structural_similarity(
|
score = vl_model_score(self.vis_evaluator,
|
||||||
img1, img2, channel_axis=-1)
|
img_pred, ori_prompt,
|
||||||
# mse = skimage.metrics.mean_squared_error(img1, img2)
|
JUDGE_PROMPT_CN)
|
||||||
# ssim greater better
|
return {'score': score, 'pred_img': img_pred}
|
||||||
# mse smaller better but has no upper bound
|
elif self.lang == 'en':
|
||||||
return ssim
|
score = vl_model_score(self.vis_evaluator,
|
||||||
|
img_pred, ori_prompt,
|
||||||
|
JUDGE_PROMPT)
|
||||||
|
return {'score': score, 'pred_img': img_pred}
|
||||||
except Exception:
|
except Exception:
|
||||||
return 0
|
return {'score': 0}
|
||||||
# Fall back to 0
|
# Fall back to 0
|
||||||
return 0
|
return {'score': 0}
|
||||||
|
|
||||||
def save_results(self, origin_prompt, steps):
|
def save_results(self, origin_prompt, steps, references):
|
||||||
"""Save the prediction result in a markdown and notebook format."""
|
"""Save the prediction result in a markdown and notebook format."""
|
||||||
|
|
||||||
|
from opencompass.lagent.actions.ipython_interpreter import extract_code
|
||||||
|
|
||||||
def check_jupytext():
|
def check_jupytext():
|
||||||
"""Check requirements existence."""
|
"""Check requirements existence."""
|
||||||
from shutil import which
|
from shutil import which
|
||||||
@ -383,11 +395,22 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
|
|
||||||
check_jupytext()
|
check_jupytext()
|
||||||
p_list = []
|
p_list = []
|
||||||
from opencompass.lagent.actions.ipython_interpreter import extract_code
|
total_results = defaultdict(float)
|
||||||
for idx, (example_origin_prompt,
|
total_scores = defaultdict(float)
|
||||||
example_steps) in enumerate(zip(origin_prompt, steps)):
|
total_nums = defaultdict(int)
|
||||||
|
|
||||||
|
for idx, (example_origin_prompt, example_steps,
|
||||||
|
gold) in enumerate(zip(origin_prompt, steps, references)):
|
||||||
|
# get result count
|
||||||
|
result, exp_output = self.single_exp(gold, example_steps,
|
||||||
|
example_origin_prompt)
|
||||||
|
for k, v in result.items():
|
||||||
|
total_scores[k] += sum(v)
|
||||||
|
total_nums[k] += len(v)
|
||||||
|
|
||||||
markdown_lines = []
|
markdown_lines = []
|
||||||
for prompt, step in zip(example_origin_prompt, example_steps):
|
for prompt, step, step_output in zip(example_origin_prompt,
|
||||||
|
example_steps, exp_output):
|
||||||
for action in step[::-1]:
|
for action in step[::-1]:
|
||||||
if action['type'] == 'IPythonInterpreter':
|
if action['type'] == 'IPythonInterpreter':
|
||||||
valid_action = action
|
valid_action = action
|
||||||
@ -401,6 +424,9 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
code_text = '```python\n' + code_text + '\n```'
|
code_text = '```python\n' + code_text + '\n```'
|
||||||
markdown_lines.append(code_text)
|
markdown_lines.append(code_text)
|
||||||
markdown_lines.append('\n')
|
markdown_lines.append('\n')
|
||||||
|
markdown_lines.append('\n'.join(
|
||||||
|
[f'{k}: {v}' for k, v in step_output.items()]))
|
||||||
|
markdown_lines.append('\n\n')
|
||||||
|
|
||||||
md_file = f'experiment{idx}.md'
|
md_file = f'experiment{idx}.md'
|
||||||
with open(md_file, 'w') as f:
|
with open(md_file, 'w') as f:
|
||||||
@ -417,10 +443,19 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
f"--allow-errors --stdin --stdout' {md_file}",
|
f"--allow-errors --stdin --stdout' {md_file}",
|
||||||
shell=True)
|
shell=True)
|
||||||
p_list.append(p)
|
p_list.append(p)
|
||||||
|
|
||||||
# TODO: async wait
|
# TODO: async wait
|
||||||
for p in p_list:
|
for p in p_list:
|
||||||
p.wait()
|
p.wait()
|
||||||
|
|
||||||
|
# get final scores
|
||||||
|
for k, v in total_scores.items():
|
||||||
|
if total_nums[k] > 0:
|
||||||
|
total_results[k] = total_scores[k] / total_nums[k] * 100
|
||||||
|
else:
|
||||||
|
total_results[k] = -1
|
||||||
|
return total_results
|
||||||
|
|
||||||
def set_data_dir(self, work_dir):
|
def set_data_dir(self, work_dir):
|
||||||
"""Set work directory and link data files for save notebook results."""
|
"""Set work directory and link data files for save notebook results."""
|
||||||
if self.user_data_dir:
|
if self.user_data_dir:
|
||||||
@ -435,7 +470,7 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
"""Change work directory and keep the symlink."""
|
"""Change work directory and keep the symlink."""
|
||||||
os.chdir(work_dir)
|
os.chdir(work_dir)
|
||||||
|
|
||||||
def single_exp(self, gold, steps):
|
def single_exp(self, gold, steps, single_ori_prompt):
|
||||||
tags = gold['tags']
|
tags = gold['tags']
|
||||||
outputs = gold['outputs']
|
outputs = gold['outputs']
|
||||||
metadata = gold['metadata']
|
metadata = gold['metadata']
|
||||||
@ -458,15 +493,25 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
for tag in check_tags:
|
for tag in check_tags:
|
||||||
key = self.TAG_MAPPING[tag][0]
|
key = self.TAG_MAPPING[tag][0]
|
||||||
result[key] = []
|
result[key] = []
|
||||||
|
result['tool_rate'] = []
|
||||||
|
|
||||||
for tag, step, output in zip(tags, steps, outputs):
|
exp_output = []
|
||||||
|
for tag, step, output, ori_prompt in zip(tags, steps, outputs,
|
||||||
|
single_ori_prompt):
|
||||||
# check whether this step is valid
|
# check whether this step is valid
|
||||||
result['executable'].append(self.valid_step(step))
|
tool_correct, exec_correct = self.valid_step(step)
|
||||||
|
result['tool_rate'].append(tool_correct)
|
||||||
|
result['executable'].append(exec_correct)
|
||||||
|
eval_output = {}
|
||||||
if tag != 'exec':
|
if tag != 'exec':
|
||||||
key, func = self.TAG_MAPPING[tag]
|
key, func = self.TAG_MAPPING[tag]
|
||||||
result[key].append(func(step, output))
|
kwargs = dict(step=step, target=output, ori_prompt=ori_prompt)
|
||||||
|
kwargs = {k: kwargs[k] for k in signature(func).parameters}
|
||||||
|
eval_output = func(**kwargs)
|
||||||
|
result[key].append(eval_output['score'])
|
||||||
|
exp_output.append(eval_output)
|
||||||
|
|
||||||
return result
|
return result, exp_output
|
||||||
|
|
||||||
def get_output_dir(self):
|
def get_output_dir(self):
|
||||||
"""Get output dir from eval task.
|
"""Get output dir from eval task.
|
||||||
@ -489,23 +534,7 @@ class CIBenchEvaluator(BaseEvaluator):
|
|||||||
if not osp.exists(self.output_dir):
|
if not osp.exists(self.output_dir):
|
||||||
os.makedirs(self.output_dir)
|
os.makedirs(self.output_dir)
|
||||||
self.set_data_dir(self.output_dir)
|
self.set_data_dir(self.output_dir)
|
||||||
self.save_results(origin_prompt, steps)
|
total_results = self.save_results(origin_prompt, steps, references)
|
||||||
self.unset_data_dir(cwd)
|
self.unset_data_dir(cwd)
|
||||||
|
|
||||||
total_results = defaultdict(float)
|
|
||||||
total_scores = defaultdict(float)
|
|
||||||
total_nums = defaultdict(int)
|
|
||||||
for gold, single_steps in zip(references, steps):
|
|
||||||
result = self.single_exp(gold, single_steps)
|
|
||||||
|
|
||||||
for k, v in result.items():
|
|
||||||
total_scores[k] += sum(v)
|
|
||||||
total_nums[k] += len(v)
|
|
||||||
|
|
||||||
for k, v in total_scores.items():
|
|
||||||
if total_nums[k] > 0:
|
|
||||||
total_results[k] = total_scores[k] / total_nums[k] * 100
|
|
||||||
else:
|
|
||||||
total_results[k] = -1
|
|
||||||
|
|
||||||
return total_results
|
return total_results
|
||||||
|
@ -144,6 +144,7 @@ class IPythonInterpreter(BaseAction):
|
|||||||
|
|
||||||
def _inner_call():
|
def _inner_call():
|
||||||
result = ''
|
result = ''
|
||||||
|
image_path = ''
|
||||||
succeed = True
|
succeed = True
|
||||||
image_idx = 0
|
image_idx = 0
|
||||||
|
|
||||||
@ -197,7 +198,7 @@ class IPythonInterpreter(BaseAction):
|
|||||||
if text:
|
if text:
|
||||||
result += f'\n\n{msg_type}:\n\n```\n{text}\n```'
|
result += f'\n\n{msg_type}:\n\n```\n{text}\n```'
|
||||||
if image:
|
if image:
|
||||||
result += f'\n\n{image}'
|
image_path += f'\n\n{image}'
|
||||||
if finished:
|
if finished:
|
||||||
# in case output text too long
|
# in case output text too long
|
||||||
# might need better design later
|
# might need better design later
|
||||||
@ -205,7 +206,7 @@ class IPythonInterpreter(BaseAction):
|
|||||||
ellip = '......'
|
ellip = '......'
|
||||||
half_len = int((self.trim_output - len(ellip)) / 2)
|
half_len = int((self.trim_output - len(ellip)) / 2)
|
||||||
result = result[:half_len] + ellip + result[-half_len:]
|
result = result[:half_len] + ellip + result[-half_len:]
|
||||||
return succeed, result
|
return succeed, result, image_path
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if timeout:
|
if timeout:
|
||||||
@ -215,7 +216,7 @@ class IPythonInterpreter(BaseAction):
|
|||||||
|
|
||||||
signal.signal(signal.SIGALRM, handler)
|
signal.signal(signal.SIGALRM, handler)
|
||||||
signal.alarm(timeout)
|
signal.alarm(timeout)
|
||||||
succeed, result = _inner_call()
|
succeed, result, image_path = _inner_call()
|
||||||
except TimeoutError:
|
except TimeoutError:
|
||||||
succeed = False
|
succeed = False
|
||||||
text = 'The code interpreter encountered an unexpected error.'
|
text = 'The code interpreter encountered an unexpected error.'
|
||||||
@ -225,7 +226,8 @@ class IPythonInterpreter(BaseAction):
|
|||||||
signal.alarm(0)
|
signal.alarm(0)
|
||||||
|
|
||||||
result = result.lstrip('\n')
|
result = result.lstrip('\n')
|
||||||
return succeed, result
|
image_path = image_path.lstrip('\n')
|
||||||
|
return succeed, result, image_path
|
||||||
|
|
||||||
def __call__(self,
|
def __call__(self,
|
||||||
command: str,
|
command: str,
|
||||||
@ -234,11 +236,12 @@ class IPythonInterpreter(BaseAction):
|
|||||||
extracted_command = extract_code(command)
|
extracted_command = extract_code(command)
|
||||||
tool_return.args = dict(text=command, extract_code=extracted_command)
|
tool_return.args = dict(text=command, extract_code=extracted_command)
|
||||||
if extracted_command:
|
if extracted_command:
|
||||||
succeed, result = self._call(extracted_command, timeout)
|
succeed, result, image_path = self._call(extracted_command,
|
||||||
|
timeout)
|
||||||
if succeed:
|
if succeed:
|
||||||
if not result:
|
if not result:
|
||||||
result = 'The code is succeed without any outputs.'
|
result = 'The code is succeed without any outputs.'
|
||||||
tool_return.result = dict(text=result)
|
tool_return.result = dict(text=result, image_path=image_path)
|
||||||
tool_return.state = ActionStatusCode.SUCCESS
|
tool_return.state = ActionStatusCode.SUCCESS
|
||||||
else:
|
else:
|
||||||
tool_return.errmsg = repr(result)
|
tool_return.errmsg = repr(result)
|
||||||
|
@ -44,9 +44,10 @@ class LagentAgent:
|
|||||||
|
|
||||||
def gt_response(self, prompt):
|
def gt_response(self, prompt):
|
||||||
if 'CIReAct' in str(self.agent.__class__):
|
if 'CIReAct' in str(self.agent.__class__):
|
||||||
gold = prompt
|
thought, gold = prompt.split('**split**')
|
||||||
prompt = f"""{self.agent._protocol.action['begin']} IPythonInterpreter
|
prompt = f"""{self.agent._protocol.thought['begin']} {thought}\
|
||||||
{self.agent._protocol.action_input['begin']} ```python\n{gold}\n```\n""" # noqa
|
\n{self.agent._protocol.action['begin']} IPythonInterpreter\n\
|
||||||
|
{self.agent._protocol.action_input['begin']}```python\n{gold}\n```\n""" # noqa
|
||||||
action_input = dict(
|
action_input = dict(
|
||||||
command=f"""```python\n{gold}\n```\n""",
|
command=f"""```python\n{gold}\n```\n""",
|
||||||
timeout=120,
|
timeout=120,
|
||||||
|
@ -17,7 +17,7 @@ from opencompass.utils import (LarkReporter, dataset_abbr_from_cfg,
|
|||||||
from opencompass.utils.prompt import get_prompt_hash
|
from opencompass.utils.prompt import get_prompt_hash
|
||||||
|
|
||||||
METRIC_WHITELIST = ['score', 'auc_score', 'accuracy', 'humaneval_pass@1', 'rouge1', 'avg_toxicity_score', 'bleurt_diff', 'matthews_correlation', 'truth', 'f1', 'exact_match']
|
METRIC_WHITELIST = ['score', 'auc_score', 'accuracy', 'humaneval_pass@1', 'rouge1', 'avg_toxicity_score', 'bleurt_diff', 'matthews_correlation', 'truth', 'f1', 'exact_match']
|
||||||
METRIC_BLACKLIST = ['bp', 'sys_len', 'ref_len', 'tool_rate']
|
METRIC_BLACKLIST = ['bp', 'sys_len', 'ref_len']
|
||||||
|
|
||||||
def model_abbr_from_cfg_used_in_summarizer(model):
|
def model_abbr_from_cfg_used_in_summarizer(model):
|
||||||
if model.get('summarizer_abbr', None):
|
if model.get('summarizer_abbr', None):
|
||||||
|
@ -5,7 +5,7 @@ json5
|
|||||||
jupyter
|
jupyter
|
||||||
jupyter_client
|
jupyter_client
|
||||||
jupytext
|
jupytext
|
||||||
lagent
|
lagent==0.1.2
|
||||||
lightgbm==4.1.0
|
lightgbm==4.1.0
|
||||||
networkx
|
networkx
|
||||||
scikit-image
|
scikit-image
|
||||||
|
@ -11,6 +11,7 @@ func_timeout
|
|||||||
fuzzywuzzy
|
fuzzywuzzy
|
||||||
immutabledict
|
immutabledict
|
||||||
jieba
|
jieba
|
||||||
|
json5
|
||||||
langdetect
|
langdetect
|
||||||
ltp
|
ltp
|
||||||
mmengine-lite
|
mmengine-lite
|
||||||
|
Loading…
Reference in New Issue
Block a user