mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Sync] Sync with internal codes 2024.06.28 (#1279)
This commit is contained in:
parent
842fb1cd70
commit
a32f21a356
1
.gitignore
vendored
1
.gitignore
vendored
@ -102,6 +102,7 @@ configs/sft_cfg/60B/*
|
|||||||
configs/sft_cfg/100B/*
|
configs/sft_cfg/100B/*
|
||||||
|
|
||||||
configs/cky/
|
configs/cky/
|
||||||
|
configs/_internal_legacy*
|
||||||
# in case llama clone in the opencompass
|
# in case llama clone in the opencompass
|
||||||
llama/
|
llama/
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ repos:
|
|||||||
exclude: |
|
exclude: |
|
||||||
(?x)^(
|
(?x)^(
|
||||||
.*\.jsonl|
|
.*\.jsonl|
|
||||||
|
opencompass/datasets/subjective/mtbench101.py|
|
||||||
configs/
|
configs/
|
||||||
)
|
)
|
||||||
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
|
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
|
||||||
|
@ -5,8 +5,8 @@ with read_base():
|
|||||||
from ..datasets.cmmlu.cmmlu_gen_c13365 import cmmlu_datasets
|
from ..datasets.cmmlu.cmmlu_gen_c13365 import cmmlu_datasets
|
||||||
from ..datasets.ceval.ceval_gen_5f30c7 import ceval_datasets
|
from ..datasets.ceval.ceval_gen_5f30c7 import ceval_datasets
|
||||||
from ..datasets.GaokaoBench.GaokaoBench_no_subjective_gen_4c31db import GaokaoBench_datasets
|
from ..datasets.GaokaoBench.GaokaoBench_no_subjective_gen_4c31db import GaokaoBench_datasets
|
||||||
from ..datasets.triviaqa.triviaqa_wiki_1shot_gen_eaf81e import triviaqa_datasets
|
from ..datasets.triviaqa.triviaqa_wiki_1shot_gen_bc5f21 import triviaqa_datasets
|
||||||
from ..datasets.nq.nq_open_1shot_gen_01cf41 import nq_datasets
|
from ..datasets.nq.nq_open_1shot_gen_2e45e5 import nq_datasets
|
||||||
from ..datasets.race.race_gen_69ee4f import race_datasets
|
from ..datasets.race.race_gen_69ee4f import race_datasets
|
||||||
from ..datasets.winogrande.winogrande_5shot_gen_b36770 import winogrande_datasets
|
from ..datasets.winogrande.winogrande_5shot_gen_b36770 import winogrande_datasets
|
||||||
from ..datasets.hellaswag.hellaswag_10shot_gen_e42710 import hellaswag_datasets
|
from ..datasets.hellaswag.hellaswag_10shot_gen_e42710 import hellaswag_datasets
|
||||||
|
57
configs/datasets/CHARM/charm_reason_ppl_3da4de.py
Normal file
57
configs/datasets/CHARM/charm_reason_ppl_3da4de.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import PPLInferencer
|
||||||
|
from opencompass.datasets import CharmDataset
|
||||||
|
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
|
||||||
|
|
||||||
|
charm_tasks = [
|
||||||
|
['Chinese_Anachronisms_Judgment', 'AB'],
|
||||||
|
['Chinese_Movie_and_Music_Recommendation', 'ABCD'],
|
||||||
|
['Chinese_Natural_Language_Inference', 'ABC'],
|
||||||
|
['Chinese_Reading_Comprehension', 'ABCD'],
|
||||||
|
['Chinese_Sequence_Understanding', 'ABCD'],
|
||||||
|
['Chinese_Sport_Understanding', 'AB'],
|
||||||
|
['Chinese_Time_Understanding', 'ABCD'],
|
||||||
|
['Global_Anachronisms_Judgment', 'AB'],
|
||||||
|
['Global_Movie_and_Music_Recommendation', 'ABCD'],
|
||||||
|
['Global_Natural_Language_Inference', 'ABC'],
|
||||||
|
['Global_Reading_Comprehension', 'ABCD'],
|
||||||
|
['Global_Sequence_Understanding', 'ABCD'],
|
||||||
|
['Global_Sport_Understanding', 'AB'],
|
||||||
|
['Global_Time_Understanding', 'ABCDEF'],
|
||||||
|
]
|
||||||
|
|
||||||
|
charm_reason_datasets = []
|
||||||
|
for task_name, options in charm_tasks:
|
||||||
|
|
||||||
|
with open(os.path.join(os.path.dirname(__file__), 'few-shot-examples', f'{task_name}_Direct.txt'), 'r') as f:
|
||||||
|
few_shot_example = f.read()
|
||||||
|
|
||||||
|
charm_reason_reader_cfg = dict(input_columns=['input'], output_column='target')
|
||||||
|
|
||||||
|
charm_reason_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template={
|
||||||
|
f'({opt})': f'{few_shot_example}\n{{input}}\nA: {opt}' for opt in options
|
||||||
|
},
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=PPLInferencer),
|
||||||
|
)
|
||||||
|
|
||||||
|
charm_reason_eval_cfg = dict(evaluator=dict(type=AccwithDetailsEvaluator))
|
||||||
|
|
||||||
|
charm_reason_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=CharmDataset,
|
||||||
|
abbr=f'charm-reason-{task_name}_Direct',
|
||||||
|
path=f'data/CHARM/reasoning',
|
||||||
|
name=task_name,
|
||||||
|
reader_cfg=charm_reason_reader_cfg,
|
||||||
|
infer_cfg=charm_reason_infer_cfg,
|
||||||
|
eval_cfg=charm_reason_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HFDataset, HumanEvaluator, humaneval_postprocess
|
from opencompass.datasets import HFDataset, HumanEvalEvaluator, humaneval_postprocess
|
||||||
|
|
||||||
apps_reader_cfg = dict(
|
apps_reader_cfg = dict(
|
||||||
input_columns=['question'], output_column='problem_id', train_split='test')
|
input_columns=['question'], output_column='problem_id', train_split='test')
|
||||||
@ -17,7 +17,7 @@ apps_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer))
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
apps_eval_cfg = dict(
|
apps_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
pred_postprocessor=dict(type=humaneval_postprocess),
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HFDataset, HumanEvaluator, humaneval_postprocess
|
from opencompass.datasets import HFDataset, HumanEvalEvaluator, humaneval_postprocess
|
||||||
|
|
||||||
apps_reader_cfg = dict(
|
apps_reader_cfg = dict(
|
||||||
input_columns=['question'], output_column='problem_id', train_split='test')
|
input_columns=['question'], output_column='problem_id', train_split='test')
|
||||||
@ -24,7 +24,7 @@ apps_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer))
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
apps_eval_cfg = dict(
|
apps_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
pred_postprocessor=dict(type=humaneval_postprocess),
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HFDataset, HumanEvaluator, humaneval_postprocess
|
from opencompass.datasets import HFDataset, HumanEvalEvaluator, humaneval_postprocess
|
||||||
|
|
||||||
apps_reader_cfg = dict(
|
apps_reader_cfg = dict(
|
||||||
input_columns=['question'], output_column='problem_id', train_split='test')
|
input_columns=['question'], output_column='problem_id', train_split='test')
|
||||||
@ -15,7 +15,7 @@ apps_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
apps_eval_cfg = dict(
|
apps_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
k=[1, 10, 100],
|
k=[1, 10, 100],
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
pred_postprocessor=dict(type=humaneval_postprocess),
|
||||||
)
|
)
|
@ -14,7 +14,7 @@ with read_base():
|
|||||||
from ..gsm8k.gsm8k_gen_ee684f import gsm8k_datasets
|
from ..gsm8k.gsm8k_gen_ee684f import gsm8k_datasets
|
||||||
from ..math.math_evaluatorv2_gen_2f4a71 import math_datasets
|
from ..math.math_evaluatorv2_gen_2f4a71 import math_datasets
|
||||||
from ..TheoremQA.TheoremQA_post_v2_gen_2c2583 import TheoremQA_datasets
|
from ..TheoremQA.TheoremQA_post_v2_gen_2c2583 import TheoremQA_datasets
|
||||||
from ..humaneval.humaneval_gen_d2537e import humaneval_datasets
|
from ..humaneval.deprecated_humaneval_gen_d2537e import humaneval_datasets
|
||||||
from ..mbpp.deprecated_sanitized_mbpp_gen_cb43ef import sanitized_mbpp_datasets
|
from ..mbpp.deprecated_sanitized_mbpp_gen_cb43ef import sanitized_mbpp_datasets
|
||||||
|
|
||||||
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
||||||
|
@ -6,7 +6,7 @@ with read_base():
|
|||||||
from ..agieval.agieval_mixed_713d14 import agieval_datasets
|
from ..agieval.agieval_mixed_713d14 import agieval_datasets
|
||||||
from ..GaokaoBench.GaokaoBench_mixed_9af5ee import GaokaoBench_datasets
|
from ..GaokaoBench.GaokaoBench_mixed_9af5ee import GaokaoBench_datasets
|
||||||
from ..bbh.bbh_gen_5b92b0 import bbh_datasets
|
from ..bbh.bbh_gen_5b92b0 import bbh_datasets
|
||||||
from ..humaneval.humaneval_gen_a82cae import humaneval_datasets
|
from ..humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
||||||
from ..mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
from ..mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
||||||
from ..CLUE_C3.CLUE_C3_ppl_e24a31 import C3_datasets
|
from ..CLUE_C3.CLUE_C3_ppl_e24a31 import C3_datasets
|
||||||
from ..CLUE_CMRC.CLUE_CMRC_gen_1bd3c8 import CMRC_datasets
|
from ..CLUE_CMRC.CLUE_CMRC_gen_1bd3c8 import CMRC_datasets
|
||||||
|
@ -5,8 +5,8 @@ with read_base():
|
|||||||
from ..cmmlu.cmmlu_gen_c13365 import cmmlu_datasets
|
from ..cmmlu.cmmlu_gen_c13365 import cmmlu_datasets
|
||||||
from ..ceval.ceval_internal_gen_2daf24 import ceval_datasets
|
from ..ceval.ceval_internal_gen_2daf24 import ceval_datasets
|
||||||
from ..GaokaoBench.GaokaoBench_no_subjective_gen_4c31db import GaokaoBench_datasets
|
from ..GaokaoBench.GaokaoBench_no_subjective_gen_4c31db import GaokaoBench_datasets
|
||||||
from ..triviaqa.triviaqa_wiki_1shot_gen_eaf81e import triviaqa_datasets
|
from ..triviaqa.triviaqa_wiki_1shot_gen_bc5f21 import triviaqa_datasets
|
||||||
from ..nq.nq_open_1shot_gen_01cf41 import nq_datasets
|
from ..nq.nq_open_1shot_gen_2e45e5 import nq_datasets
|
||||||
from ..race.race_gen_69ee4f import race_datasets
|
from ..race.race_gen_69ee4f import race_datasets
|
||||||
from ..winogrande.winogrande_5shot_gen_6447e6 import winogrande_datasets
|
from ..winogrande.winogrande_5shot_gen_6447e6 import winogrande_datasets
|
||||||
from ..hellaswag.hellaswag_10shot_gen_e42710 import hellaswag_datasets
|
from ..hellaswag.hellaswag_10shot_gen_e42710 import hellaswag_datasets
|
||||||
|
@ -42,8 +42,8 @@ with read_base():
|
|||||||
from ...siqa.siqa_ppl_e8d8c5 import siqa_datasets
|
from ...siqa.siqa_ppl_e8d8c5 import siqa_datasets
|
||||||
from ...math.math_gen_265cce import math_datasets
|
from ...math.math_gen_265cce import math_datasets
|
||||||
from ...gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
from ...gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
||||||
from ...drop.drop_gen_8a9ed9 import drop_datasets
|
from ...drop.deprecated_drop_gen_8a9ed9 import drop_datasets
|
||||||
from ...humaneval.humaneval_gen_a82cae import humaneval_datasets
|
from ...humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
||||||
from ...mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
from ...mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
||||||
from ...bbh.bbh_gen_5bf00b import bbh_datasets
|
from ...bbh.bbh_gen_5bf00b import bbh_datasets
|
||||||
|
|
||||||
|
@ -42,8 +42,8 @@ with read_base():
|
|||||||
from ...siqa.siqa_ppl_e8d8c5 import siqa_datasets
|
from ...siqa.siqa_ppl_e8d8c5 import siqa_datasets
|
||||||
from ...math.math_gen_265cce import math_datasets
|
from ...math.math_gen_265cce import math_datasets
|
||||||
from ...gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
from ...gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
||||||
from ...drop.drop_gen_8a9ed9 import drop_datasets
|
from ...drop.deprecated_drop_gen_8a9ed9 import drop_datasets
|
||||||
from ...humaneval.humaneval_gen_a82cae import humaneval_datasets
|
from ...humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
||||||
from ...mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
from ...mbpp.deprecated_mbpp_gen_1e1056 import mbpp_datasets
|
||||||
from ...bbh.bbh_gen_5b92b0 import bbh_datasets
|
from ...bbh.bbh_gen_5b92b0 import bbh_datasets
|
||||||
|
|
||||||
|
@ -0,0 +1,57 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import AgentInferencer
|
||||||
|
from opencompass.datasets import CIBenchDataset, CIBenchEvaluator
|
||||||
|
|
||||||
|
libs = [
|
||||||
|
'/lightgbm',
|
||||||
|
'/matplotlib',
|
||||||
|
'/nltk',
|
||||||
|
'/opencv',
|
||||||
|
'/pandas',
|
||||||
|
'/pytorch',
|
||||||
|
'/scipy',
|
||||||
|
'/seaborn',
|
||||||
|
'/sklearn',
|
||||||
|
'/tensorflow',
|
||||||
|
'_chinese/lightgbm',
|
||||||
|
'_chinese/matplotlib',
|
||||||
|
'_chinese/nltk',
|
||||||
|
'_chinese/opencv',
|
||||||
|
'_chinese/pandas',
|
||||||
|
'_chinese/pytorch',
|
||||||
|
'_chinese/scipy',
|
||||||
|
'_chinese/seaborn',
|
||||||
|
'_chinese/sklearn',
|
||||||
|
'_chinese/tensorflow',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
cibench_datasets = []
|
||||||
|
for lib in libs:
|
||||||
|
cibench_reader_cfg = dict(
|
||||||
|
input_columns=['questions'], output_column='references', train_split='test', test_split='test'
|
||||||
|
)
|
||||||
|
|
||||||
|
cibench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template='{questions}',
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=AgentInferencer, infer_mode='every'),
|
||||||
|
)
|
||||||
|
|
||||||
|
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
cibench_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr=f'cibench_template{lib}',
|
||||||
|
type=CIBenchDataset,
|
||||||
|
path=f'data/compassbench_v1.1/agent-cibench/cibench_template{lib}',
|
||||||
|
internet_check=False,
|
||||||
|
reader_cfg=cibench_reader_cfg,
|
||||||
|
infer_cfg=cibench_infer_cfg,
|
||||||
|
eval_cfg=cibench_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,56 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import ChatInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import TEvalEvaluator
|
||||||
|
from opencompass.datasets import teval_postprocess, TEvalDataset
|
||||||
|
|
||||||
|
plugin_eval_subject_mapping = {
|
||||||
|
'instruct': ['instruct_v1'],
|
||||||
|
'instruct_zh': ['instruct_v1_zh'],
|
||||||
|
'plan': ['plan_json_v1', 'plan_str_v1'],
|
||||||
|
'plan_zh': ['plan_json_v1_zh', 'plan_str_v1_zh'],
|
||||||
|
'review': ['review_str_v1'],
|
||||||
|
'review_zh': ['review_str_v1_zh'],
|
||||||
|
'reason_retrieve_understand': ['reason_retrieve_understand_json_v1'],
|
||||||
|
'reason_retrieve_understand_zh': ['reason_retrieve_understand_json_v1_zh'],
|
||||||
|
'reason': ['reason_str_v1'],
|
||||||
|
'reason_zh': ['reason_str_v1_zh'],
|
||||||
|
'retrieve': ['retrieve_str_v1'],
|
||||||
|
'retrieve_zh': ['retrieve_str_v1_zh'],
|
||||||
|
'understand': ['understand_str_v1'],
|
||||||
|
'understand_zh': ['understand_str_v1_zh'],
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin_eval_datasets = []
|
||||||
|
for _name in plugin_eval_subject_mapping:
|
||||||
|
plugin_eval_reader_cfg = dict(input_columns=['prompt'], output_column='ground_truth')
|
||||||
|
plugin_eval_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='{prompt}'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=ChatInferencer),
|
||||||
|
)
|
||||||
|
plugin_eval_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=TEvalEvaluator, subset=_name),
|
||||||
|
pred_postprocessor=dict(type=teval_postprocess),
|
||||||
|
num_gpus=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
for subset in plugin_eval_subject_mapping[_name]:
|
||||||
|
plugin_eval_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='plugin_eval-mus-p10-' + subset,
|
||||||
|
type=TEvalDataset,
|
||||||
|
path='data/compassbench_v1.1/agent-teval-p10',
|
||||||
|
name=subset,
|
||||||
|
reader_cfg=plugin_eval_reader_cfg,
|
||||||
|
infer_cfg=plugin_eval_infer_cfg,
|
||||||
|
eval_cfg=plugin_eval_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,291 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, HumanEvalPlusEvaluator, humaneval_postprocess_v2
|
||||||
|
from opencompass.datasets import MBPPDataset, SanitizedMBPPDataset, MBPPEvaluator
|
||||||
|
from opencompass.datasets import HumanevalXDataset, HumanevalXEvaluator
|
||||||
|
from opencompass.datasets import LCDataset, LCPassKEvaluator
|
||||||
|
from opencompass.datasets import TACODataset, TACOEvaluator
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets = []
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- HumanEval CN ---------------------------------------------------------------
|
||||||
|
humaneval_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
humaneval_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='完成以下Python代码任务:\n{prompt}'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='openai_humaneval_cn',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='data/compassbench_v1.1/code/humaneval_cn/human-eval-cn-v2-20210705.jsonl',
|
||||||
|
reader_cfg=humaneval_reader_cfg,
|
||||||
|
infer_cfg=humaneval_infer_cfg,
|
||||||
|
eval_cfg=humaneval_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- HumanEval Plus ---------------------------------------------------------------
|
||||||
|
humaneval_plus_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
# TODO: allow empty output-column
|
||||||
|
humaneval_plus_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Complete the following python code:\n{prompt}'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_plus_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalPlusEvaluator),
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='humaneval_plus',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='data/compassbench_v1.1/code/humaneval/human-eval-v2-20210705.jsonl',
|
||||||
|
reader_cfg=humaneval_plus_reader_cfg,
|
||||||
|
infer_cfg=humaneval_plus_infer_cfg,
|
||||||
|
eval_cfg=humaneval_plus_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- MBPP CN ---------------------------------------------------------------
|
||||||
|
mbpp_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_list_2')
|
||||||
|
|
||||||
|
mbpp_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个函数,从给定的两个元组列表中查找相似的元素。 你的代码应该通过这些测试:\n\n assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\n assert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4) \n assert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14) \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res)' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个 Python 函数来识别一个整数是否不是素数。 你的代码应该通过这些测试:\n\n assert is_not_prime(2) == False \n assert is_not_prime(10) == True \n assert is_not_prime(35) == True \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个函数,使用堆队列算法从给定的数字列表中查找最大整数。 你的代码应该通过这些测试:\n\n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是: {text} 你的代码应该通过这些测试:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
mbpp_eval_cfg = dict(evaluator=dict(type=MBPPEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=MBPPDataset,
|
||||||
|
abbr='mbpp_cn',
|
||||||
|
path='data/compassbench_v1.1/code/mbpp_cn/mbpp_cn.jsonl',
|
||||||
|
reader_cfg=mbpp_reader_cfg,
|
||||||
|
infer_cfg=mbpp_infer_cfg,
|
||||||
|
eval_cfg=mbpp_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- Sanitized MBPP ---------------------------------------------------------------
|
||||||
|
sanitized_mbpp_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_list_2')
|
||||||
|
|
||||||
|
sanitized_mbpp_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the similar elements from the given two tuple lists. Your code should pass these tests:\n\n assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\n assert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4) \n assert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14) \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res)' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a python function to identify non-prime numbers. Your code should pass these tests:\n\n assert is_not_prime(2) == False \n assert is_not_prime(10) == True \n assert is_not_prime(35) == True \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the largest integers from a given list of numbers using heap queue algorithm. Your code should pass these tests:\n\n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
sanitized_mbpp_eval_cfg = dict(evaluator=dict(type=MBPPEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=SanitizedMBPPDataset,
|
||||||
|
abbr='sanitized_mbpp',
|
||||||
|
path='data/compassbench_v1.1/code/mbpp/sanitized-mbpp.jsonl',
|
||||||
|
reader_cfg=sanitized_mbpp_reader_cfg,
|
||||||
|
infer_cfg=sanitized_mbpp_infer_cfg,
|
||||||
|
eval_cfg=sanitized_mbpp_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- HumanevalX ---------------------------------------------------------------
|
||||||
|
humanevalx_reader_cfg = dict(input_columns=['prompt'], output_column='declaration', train_split='test')
|
||||||
|
|
||||||
|
humanevalx_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template='{prompt}'),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||||
|
)
|
||||||
|
|
||||||
|
humanevalx_eval_cfg_dict = {
|
||||||
|
lang: dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=HumanevalXEvaluator,
|
||||||
|
language=lang,
|
||||||
|
ip_address=
|
||||||
|
'localhost', # replace to your code_eval_server ip_address, port
|
||||||
|
port=5001,
|
||||||
|
), # refer to https://opencompass.readthedocs.io/en/latest/advanced_guides/code_eval_service.html to launch a server
|
||||||
|
)
|
||||||
|
for lang in ['python', 'cpp', 'go', 'java', 'js'] # do not support rust now
|
||||||
|
}
|
||||||
|
|
||||||
|
# Please download the needed `xx.jsonl.gz` from
|
||||||
|
# https://github.com/THUDM/CodeGeeX2/tree/main/benchmark/humanevalx
|
||||||
|
# and move them into `data/humanevalx/` folder
|
||||||
|
for lang in ['python', 'cpp', 'go', 'java', 'js']:
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=HumanevalXDataset,
|
||||||
|
abbr=f'humanevalx-{lang}',
|
||||||
|
language=lang,
|
||||||
|
path='data/compassbench_v1.1/code/humanevalx',
|
||||||
|
reader_cfg=humanevalx_reader_cfg,
|
||||||
|
infer_cfg=humanevalx_infer_cfg,
|
||||||
|
eval_cfg=humanevalx_eval_cfg_dict[lang],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- LCBench ---------------------------------------------------------------
|
||||||
|
LC_difficulties_list = ['EASY', 'MEDIUM', 'HARD']
|
||||||
|
LC_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_column')
|
||||||
|
|
||||||
|
|
||||||
|
LC_en_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given three positive integers n, x, and y.\nIn a city, there exist houses numbered 1 to n connected by n streets. There is a street connecting the house numbered i with the house numbered i + 1 for all 1 <= i <= n - 1 . An additional street connects the house numbered x with the house numbered y.\nFor each k, such that 1 <= k <= n, you need to find the number of pairs of houses (house1, house2) such that the minimum number of streets that need to be traveled to reach house2 from house1 is k.\nReturn a 1-indexed array result of length n where result[k] represents the total number of pairs of houses such that the minimum streets required to reach one house from the other is k.\nNote that x and y can be equal. Your code should pass these tests:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given a string word containing lowercase English letters.\nTelephone keypads have keys mapped with distinct collections of lowercase English letters, which can be used to form words by pushing them. For example, the key 2 is mapped with ["a","b","c"], we need to push the key one time to type "a", two times to type "b", and three times to type "c" .\nIt is allowed to remap the keys numbered 2 to 9 to distinct collections of letters. The keys can be remapped to any amount of letters, but each letter must be mapped to exactly one key. You need to find the minimum number of times the keys will be pushed to type the string word.\nReturn the minimum number of pushes needed to type word after remapping the keys.\nAn example mapping of letters to keys on a telephone keypad is given below. Note that 1, *, #, and 0 do not map to any letters. Your code should pass these tests:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt="You are an expert Python programmer, and here is your task: You are given an integer k and an integer x.\nConsider s is the 1-indexed binary representation of an integer num. The price of a number num is the number of i's such that i % x == 0 and s[i] is a set bit.\nReturn the greatest integer num such that the sum of prices of all numbers from 1 to num is less than or equal to k.\nNote:\nIn the binary representation of a number set bit is a bit of value 1.\nThe binary representation of a number will be indexed from right to left. For example, if s == 11100, s[4] == 1 and s[2] == 0. Your code should pass these tests:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n"),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
LC_cn_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你三个 正整数 n 、x 和 y 。\n在城市中,存在编号从 1 到 n 的房屋,由 n 条街道相连。对所有 1 <= i < n ,都存在一条街道连接编号为 i 的房屋与编号为 i + 1 的房屋。另存在一条街道连接编号为 x 的房屋与编号为 y 的房屋。\n对于每个 k(1 <= k <= n),你需要找出所有满足要求的 房屋对 [house1, house2] ,即从 house1 到 house2 需要经过的 最少 街道数为 k 。\n返回一个下标从 1 开始且长度为 n 的数组 result ,其中 result[k] 表示所有满足要求的房屋对的数量,即从一个房屋到另一个房屋需要经过的 最少 街道数为 k 。\n注意,x 与 y 可以 相等。你的代码需要通过以下测试:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个字符串 word,由 不同 小写英文字母组成。\n电话键盘上的按键与 不同 小写英文字母集合相映射,可以通过按压按键来组成单词。例如,按键 2 对应 ["a","b","c"],我们需要按一次键来输入 "a",按两次键来输入 "b",按三次键来输入 "c"。\n现在允许你将编号为 2 到 9 的按键重新映射到 不同 字母集合。每个按键可以映射到 任意数量 的字母,但每个字母 必须 恰好 映射到 一个 按键上。你需要找到输入字符串 word 所需的 最少 按键次数。\n返回重新映射按键后输入 word 所需的 最少 按键次数。\n下面给出了一种电话键盘上字母到按键的映射作为示例。注意 1,*,# 和 0 不 对应任何字母。你的代码需要通过以下测试:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个整数 k 和一个整数 x 。\n令 s 为整数 num 的下标从 1 开始的二进制表示。我们说一个整数 num 的 价值 是满足 i % x == 0 且 s[i] 是 设置位 的 i 的数目。\n请你返回 最大 整数 num ,满足从 1 到 num 的所有整数的 价值 和小于等于 k 。\n注意:\n一个整数二进制表示下 设置位 是值为 1 的数位。\n一个整数的二进制表示下标从右到左编号,比方说如果 s == 11100 ,那么 s[4] == 1 且 s[2] == 0。你的代码需要通过以下测试:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: {text} 你的代码需要通过以下测试:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
LC_eval_cfg = dict(evaluator=dict(type=LCPassKEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
for difficulty in LC_difficulties_list:
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=LCDataset,
|
||||||
|
abbr='lcbench_en-' + difficulty,
|
||||||
|
path='data/compassbench_v1.1/code/LCBench2023/LCBench2023.jsonl',
|
||||||
|
difficulty=difficulty,
|
||||||
|
reader_cfg=LC_reader_cfg,
|
||||||
|
infer_cfg=LC_en_infer_cfg,
|
||||||
|
eval_cfg=LC_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=LCDataset,
|
||||||
|
abbr='lcbench_cn-' + difficulty,
|
||||||
|
path='data/compassbench_v1.1/code/LCBench2023/LCBench2023_cn.jsonl',
|
||||||
|
difficulty=difficulty,
|
||||||
|
reader_cfg=LC_reader_cfg,
|
||||||
|
infer_cfg=LC_cn_infer_cfg,
|
||||||
|
eval_cfg=LC_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- TACO ---------------------------------------------------------------
|
||||||
|
TACO_difficulties_list = ['EASY', 'MEDIUM', 'MEDIUM_HARD', 'HARD', 'VERY_HARD']
|
||||||
|
TACO_reader_cfg = dict(input_columns=['question', 'starter'], output_column='problem_id', train_split='test')
|
||||||
|
|
||||||
|
TACO_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Please write a python program to address the following QUESTION. Your ANSWER should be in a code block format like this: ```python # Write your code here ```. \nQUESTION:\n{question} {starter}\nANSWER:\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||||
|
)
|
||||||
|
|
||||||
|
TACO_eval_cfg = dict(evaluator=dict(type=TACOEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
for difficulty in TACO_difficulties_list:
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=TACODataset,
|
||||||
|
abbr='TACO-' + difficulty,
|
||||||
|
path='data/compassbench_v1.1/code/BAAI-TACO',
|
||||||
|
difficulty=difficulty,
|
||||||
|
reader_cfg=TACO_reader_cfg,
|
||||||
|
infer_cfg=TACO_infer_cfg,
|
||||||
|
eval_cfg=TACO_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,133 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
||||||
|
from opencompass.datasets import WikiBenchDataset
|
||||||
|
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||||
|
|
||||||
|
# ----------------------- Prompt Content----------------------- #
|
||||||
|
zero_shot_prompts = {
|
||||||
|
'single_choice_prompts': [
|
||||||
|
dict(role='HUMAN', prompt='以下是一道单项选择题,请你根据你了解的知识给出正确的答案选项。\n下面是你要回答的题目::\n{question}\n答案选项:'),
|
||||||
|
dict(role='BOT', prompt='{answer}')
|
||||||
|
]
|
||||||
|
}
|
||||||
|
few_shot_prompts = {
|
||||||
|
'single_choice_prompts': {
|
||||||
|
'single_choice_cn': [
|
||||||
|
dict(role='HUMAN', prompt='题目:“一丝不苟”中的“丝”的本意是( )。\nA. 计量单位\nB. 丝线\nC. 丝绸\nD. 发丝'),
|
||||||
|
dict(role='BOT', prompt='答案:A'),
|
||||||
|
dict(role='HUMAN', prompt='题目:五华县体育场位于哪个省?\nA. 湖南省\nB. 浙江省\nC. 广东省\nD. 江苏省'),
|
||||||
|
dict(role='BOT', prompt='答案:C'),
|
||||||
|
dict(role='HUMAN', prompt='题目:“西施犬的原产地是哪里?\nA. 印度\nB. 中国\nC. 西藏\nD. 台湾'),
|
||||||
|
dict(role='BOT', prompt='答案:C'),
|
||||||
|
dict(role='HUMAN', prompt='题目:四库全书的四库是指什么?\nA. 易、书、诗、礼\nB. 经、史、子、音\nC. 诗、书、音、律\nD. 经、史、子、集'),
|
||||||
|
dict(role='BOT', prompt='答案:D'),
|
||||||
|
dict(role='HUMAN', prompt='题目:{question}'),
|
||||||
|
]}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------- Prompt Template----------------------- #
|
||||||
|
|
||||||
|
# Use Zero-Shot or not
|
||||||
|
with_few_shot = True
|
||||||
|
|
||||||
|
# Max for this dataset is 4, should be set with `with_few_shot`
|
||||||
|
few_shot_samples = 4
|
||||||
|
|
||||||
|
# Use circular evaluation or not
|
||||||
|
with_circular_eval = True
|
||||||
|
|
||||||
|
single_choice_prompts = zero_shot_prompts['single_choice_prompts'] if not with_few_shot else few_shot_prompts['single_choice_prompts']
|
||||||
|
|
||||||
|
# Set few shot prompt number
|
||||||
|
if with_few_shot:
|
||||||
|
assert few_shot_samples > 0
|
||||||
|
for _name in list(single_choice_prompts.keys()):
|
||||||
|
single_choice_prompts[_name] = single_choice_prompts[_name][- few_shot_samples * 2 - 2:]
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_sets = {
|
||||||
|
'common_knowledge': ['single_choice_cn'],
|
||||||
|
'humanity': ['single_choice_cn'],
|
||||||
|
'natural_science': ['single_choice_cn'],
|
||||||
|
'social_science': ['single_choice_cn'],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------- Dataset Config----------------------- #
|
||||||
|
compassbench_v1_knowledge_datasets = []
|
||||||
|
|
||||||
|
for _split in list(compassbench_v1_knowledge_sets.keys()):
|
||||||
|
for _name in compassbench_v1_knowledge_sets[_split]:
|
||||||
|
compassbench_v1_knowledge_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_infer_cfg = dict(
|
||||||
|
ice_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin='</E>',
|
||||||
|
round=single_choice_prompts[_name]
|
||||||
|
),
|
||||||
|
ice_token='</E>',
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer),
|
||||||
|
)
|
||||||
|
compassbench_v1_knowledge_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=CircularEvaluator if with_circular_eval else AccEvaluator),
|
||||||
|
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
|
||||||
|
)
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=WikiBenchDataset,
|
||||||
|
abbr='compassbench_v1_knowledge-' + _split + '-' + _name + '_' + 'circular' if with_circular_eval else '',
|
||||||
|
path=f'data/compassbench_v1.1/knowledge/{_split}/{_name}.jsonl',
|
||||||
|
name=_name + '_circular' if with_circular_eval else _name,
|
||||||
|
reader_cfg=compassbench_v1_knowledge_reader_cfg,
|
||||||
|
infer_cfg=compassbench_v1_knowledge_infer_cfg,
|
||||||
|
eval_cfg=compassbench_v1_knowledge_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from opencompass.datasets import TriviaQADataset_V3, TriviaQAEvaluator
|
||||||
|
|
||||||
|
triviaqa_and_nq_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||||
|
|
||||||
|
triviaqa_and_nq_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Question: When do stores stop selling alcohol in indiana?'),
|
||||||
|
dict(role='BOT', prompt='Answer: 3 a.m.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Edinburgh of the Seven Seas is the capital of which group of islands?'),
|
||||||
|
dict(role='BOT', prompt='Answer: Tristan da Cunha'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Which book of the Christian Bible\'s new testament comprises a letter from St Paul to members of a church that he had founded at Macedonia?'),
|
||||||
|
dict(role='BOT', prompt='Answer: Philippians'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The Hindu deity Hanuman appears in the form of which animal?'),
|
||||||
|
dict(role='BOT', prompt='Answer: A monkey'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Who hosts the ITV quiz show The Chase?'),
|
||||||
|
dict(role='BOT', prompt='Answer: Bradley Walsh'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Question:']),
|
||||||
|
)
|
||||||
|
|
||||||
|
triviaqa_and_nq_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=TriviaQADataset_V3,
|
||||||
|
abbr='compassbench_v1_knowledge-mixed-cloze_en',
|
||||||
|
path='data/compassbench_v1.1/knowledge/mixed/cloze_en.jsonl',
|
||||||
|
reader_cfg=triviaqa_and_nq_reader_cfg,
|
||||||
|
infer_cfg=triviaqa_and_nq_infer_cfg,
|
||||||
|
eval_cfg=triviaqa_and_nq_eval_cfg
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,46 @@
|
|||||||
|
from opencompass.openicl.icl_evaluator import EMEvaluator, BleuFloresEvaluator, RougeEvaluator, JiebaRougeEvaluator
|
||||||
|
|
||||||
|
compassbench_v1_language_datasets = [
|
||||||
|
# dict(path='data/compassbench_v1.1/language/information_retrieval_en.jsonl',
|
||||||
|
# abbr='information_retrieval_en', data_type= 'qa', infer_method='gen', output_column='answers',
|
||||||
|
# human_prompt='{context}\n{origin_question}\nConcise and direct answer is',
|
||||||
|
# evaluator=EMEvaluator),
|
||||||
|
# dict(path='data/compassbench_v1.1/language/information_retrieval_zh.jsonl',
|
||||||
|
# abbr='information_retrieval_zh', data_type= 'qa', infer_method='gen', output_column='answers',
|
||||||
|
# human_prompt='{context}\n{origin_question}\n简单直接的回答是',
|
||||||
|
# evaluator=EMEvaluator),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1/language/intention_recognition_en.jsonl',
|
||||||
|
abbr='intention_recognition_en_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/language/intention_recognition_zh.jsonl',
|
||||||
|
abbr='intention_recognition_zh_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1/language/sentiment_analysis_en.jsonl',
|
||||||
|
abbr='sentiment_analysis_en_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/language/sentiment_analysis_zh.jsonl',
|
||||||
|
abbr='sentiment_analysis_zh_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1/language/translation.jsonl',
|
||||||
|
abbr='translation', data_type= 'qa', infer_method='gen',
|
||||||
|
evaluator=BleuFloresEvaluator),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1/language/content_critic_en.jsonl',
|
||||||
|
abbr='content_critic_en_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/language/content_critic_zh.jsonl',
|
||||||
|
abbr='content_critic_zh_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1/language/content_summarization_en.jsonl',
|
||||||
|
abbr='content_summarization_en', data_type= 'qa', infer_method='gen', output_column='summary',
|
||||||
|
human_prompt='{article}\nSummary of the article is:\n',
|
||||||
|
evaluator=RougeEvaluator),
|
||||||
|
dict(path='data/compassbench_v1.1/language/content_summarization_zh.jsonl',
|
||||||
|
abbr='content_summarization_zh', data_type= 'qa', infer_method='gen', output_column='summary',
|
||||||
|
human_prompt='{article}\n上述内容摘要如下:\n',
|
||||||
|
evaluator=JiebaRougeEvaluator),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1/language/traditional_cultural_understanding_zh.jsonl',
|
||||||
|
abbr='traditional_cultural_understanding_zh_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1/language/chinese_semantic_understanding_zh.jsonl',
|
||||||
|
abbr='chinese_semantic_understanding_zh_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
]
|
@ -0,0 +1,81 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
from copy import deepcopy
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer, PPLInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
||||||
|
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
||||||
|
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .mathbench_prompt import zero_shot_prompts, few_shot_prompts, mathbench_sets
|
||||||
|
|
||||||
|
# Max for this dataset is 4
|
||||||
|
num_shot = 4
|
||||||
|
# Generate reasoning path or not, only for single choice
|
||||||
|
with_reasoning = False
|
||||||
|
# Use circular evaluation or not
|
||||||
|
with_circular_eval = True
|
||||||
|
# Use PPL mode in single choice test or not
|
||||||
|
use_ppl_single_choice = False
|
||||||
|
|
||||||
|
assert 0 <= num_shot <= 4
|
||||||
|
if num_shot == 0:
|
||||||
|
prompts = zero_shot_prompts
|
||||||
|
else:
|
||||||
|
prompts = {name: p[- 2 * num_shot - 2:] for name, p in few_shot_prompts.items()}
|
||||||
|
|
||||||
|
compassbench_v1_1_math_datasets = []
|
||||||
|
for _split in mathbench_sets:
|
||||||
|
for _name in mathbench_sets[_split]:
|
||||||
|
if 'single_choice' in _name:
|
||||||
|
if with_reasoning:
|
||||||
|
template_round = prompts[_name + '_with_reasoning']
|
||||||
|
else:
|
||||||
|
template_round = prompts[_name]
|
||||||
|
else:
|
||||||
|
template_round = prompts[_name]
|
||||||
|
|
||||||
|
if 'single_choice' in _name:
|
||||||
|
pred_postprocessor = dict(type=first_option_postprocess, options='ABCD')
|
||||||
|
else:
|
||||||
|
pred_postprocessor = dict(type=mathbench_postprocess, name=_name)
|
||||||
|
|
||||||
|
if 'single_choice' in _name and with_circular_eval:
|
||||||
|
evaluator = dict(type=CircularEvaluator)
|
||||||
|
else:
|
||||||
|
evaluator = dict(type=AccEvaluator)
|
||||||
|
|
||||||
|
# assemble the final config
|
||||||
|
mathbench_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||||
|
if use_ppl_single_choice and 'single_choice' in _name and not with_reasoning:
|
||||||
|
template = {}
|
||||||
|
for answer in ['A', 'B', 'C', 'D']:
|
||||||
|
one_template_round = deepcopy(template_round)
|
||||||
|
one_template_round['round'][-1]['prompt'] = one_template_round['round'][-1]['prompt'].format(answer=answer)
|
||||||
|
template[answer] = dict(round=one_template_round)
|
||||||
|
mathbench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=template),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=PPLInferencer),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
mathbench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=dict(round=template_round)),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
||||||
|
)
|
||||||
|
mathbench_eval_cfg = dict(evaluator=evaluator, pred_postprocessor=pred_postprocessor)
|
||||||
|
|
||||||
|
compassbench_v1_1_math_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='mathbench-' + _split + '-' + _name,
|
||||||
|
type=MathBenchDataset,
|
||||||
|
path=f'data/compassbench_v1.1/math/{_split}',
|
||||||
|
name=_name,
|
||||||
|
with_circular=with_circular_eval,
|
||||||
|
reader_cfg=mathbench_reader_cfg,
|
||||||
|
infer_cfg=mathbench_infer_cfg,
|
||||||
|
eval_cfg=mathbench_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
103
configs/datasets/compassbench_20_v1_1/math/mathbench_prompt.py
Normal file
103
configs/datasets/compassbench_20_v1_1/math/mathbench_prompt.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
zero_shot_prompts = {
|
||||||
|
'single_choice_cn_with_reasoning': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你一步一步推理,并在最后用“所以答案为选项X”给出答案,其中“X”为选项A,B,C,D中你认为正确的选项。下面是你要回答的问题\n{question}\n让我们一步一步思考:'),
|
||||||
|
],
|
||||||
|
'single_choice_cn': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n所以答案是:'),
|
||||||
|
],
|
||||||
|
'single_choice_en_with_reasoning': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please reason through it step by step, and at the end, provide your answer option with "Therefore, the correct answer is option X", Where "X" is the correct option you think from A,B,C,D. Here is the question you need to answer:\n{question}\nLet\'s think step by step:'),
|
||||||
|
],
|
||||||
|
'single_choice_en': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nThe answer is:'),
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
few_shot_prompts = {
|
||||||
|
'single_choice_cn': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____?\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
|
||||||
|
dict(role='BOT', prompt='回答: D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='回答: D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
|
||||||
|
dict(role='BOT', prompt='回答: A'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 圆锥的底面半径为2,高为4.一个圆柱的下底面在圆锥的底面上,上底面的圆周在圆锥的侧面上,当圆柱侧面积为$4 \\pi$时,该圆柱的体积为____\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
||||||
|
dict(role='BOT', prompt='回答: B'),
|
||||||
|
dict(role='HUMAN', prompt='问题: {question}'),
|
||||||
|
dict(role='BOT', prompt='回答: {answer}'),
|
||||||
|
],
|
||||||
|
'single_choice_cn_with_reasoning' : [
|
||||||
|
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
|
||||||
|
dict(role='BOT', prompt='回答: 因为首先,我们将等式两边同时乘以$(3 + i)$的共轭复数$(3 - i)$,以便消去分母中的虚数部分:$z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$,这样做的目的是利用复数乘法的性质,从而消去分母中的虚数部分。我们进行乘法运算:$z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$,$z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$,由于$i^2 = -1$,我们可以进一步简化:$z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{{1}{2} - \\frac{1}{2}i$,在复平面上,这个点位于第四象限,因为它的实部是正的($\\frac{1}{2}$),而虚部是负的($-\\frac{1}{2}$)\n答案选项:D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='回答: 将函数$y=\\tan(\\omega x-1)$(其中$\\omega>0$)的图像向左平移2个单位长度后,得到的新函数为$y=\\tan(\\omega(x+2)-1)$。为了使这个新函数的图像与$y=\\tan(\\omega x+3)$的图像重合,我们需要满足以下条件:\n$\\tan(\\omega(x+2)-1) = \\tan(\\omega x+3)$\n由于正切函数的周期性,我们知道$\\tan(\\theta) = \\tan(\\theta + k\\pi)$,其中$k$是任意整数。因此,我们可以写出:\n$\\omega(x+2)-1 = \\omega x+3 + k\\pi$\n解这个方程,我们得到:$\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$\n$2\\omega - 1 = 3 + k\\pi$\n$\\omega = 2 + \\frac{k\\pi}{2}$\n由于$\\omega>0$,我们需要找到满足条件的最小正整数$k$。当$k=0$时,我们得到$\\omega=2$,这是满足条件的最小正值。因此,最小值等于2。答案选项:D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
|
||||||
|
dict(role='BOT', prompt='回答: 我们可以将这个方程写成:$a + 2ai + b = 2i$,然后将实部和虚部分开:\n$a + b = 0$\n$2a = 2$\n由于$a + b = 0$,我们可以得到$a = -b$。将这个值代入$2a = 2$,我们得到$2(-b) = 2$,因此$b = -1$。因此,$a = 1$。答案选项:A'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 一直角三角形的两边长分别为3和4.则第三边的长为____\nA. 5\nB. $\\sqrt 7$\nC. $\\sqrt 5$\nD. 5或者$\\sqrt 7$'),
|
||||||
|
dict(role='BOT', prompt='回答: 我们可以使用勾股定理来解决这个问题。勾股定理告诉我们,如果一个三角形的两条边长分别为$a$和$b$,那么第三条边的长度为$c = \\sqrt{a^2 + b^2}$。在这个问题中,$a = 3$,$b = 4$,因此$c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$。答案选项:A'),
|
||||||
|
dict(role='HUMAN', prompt='问题: {question}'),
|
||||||
|
dict(role='BOT', prompt='回答: {answer}'),
|
||||||
|
],
|
||||||
|
'single_choice_en': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
|
||||||
|
dict(role='BOT', prompt='Response: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='Response: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
|
||||||
|
dict(role='BOT', prompt='Response: A'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
||||||
|
dict(role='BOT', prompt='Response: B'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
dict(role='BOT', prompt='Response: {answer}'),
|
||||||
|
],
|
||||||
|
'single_choice_en_with_reasoning': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
|
||||||
|
dict(role='BOT', prompt='Response: First, we multiply both sides of the equation by the conjugate of $(3 + i)$: $z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$. We perform the multiplication: $z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$, $z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$. Since $i^2 = -1$, we can simplify this further: $z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{1}{2} - \\frac{1}{2}i$. In the complex plane, this point is located in the fourth quadrant, because its real part is positive ($\\frac{1}{2}$) and its imaginary part is negative ($-\\frac{1}{2}$)\nAnswer option: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='Response: In order for the graph of this new function to coincide with the graph of $y = \\tan(\\omega x + 3)$, we need to satisfy the following condition: $\\tan(\\omega(x + 2) - 1) = \\tan(\\omega x + 3)$. Therefore, we can write: $\\omega(x + 2) - 1 = \\omega x + 3 + k\\pi$. Solving this equation, we get: $\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$. $2\\omega - 1 = 3 + k\\pi$. $\\omega = 2 + \\frac{k\\pi}{2}$. Since $\\omega > 0$, we need to find the smallest positive integer $k$ that satisfies the condition. When $k = 0$, we get $\\omega = 2$, which is the smallest positive value that satisfies the condition. Therefore, the minimum value is 2. Answer option: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
|
||||||
|
dict(role='BOT', prompt='Response: We can write this equation as: $a + 2ai + b = 2i$, and then separate the real and imaginary parts: $a + b = 0$. $2a = 2$. Since $a + b = 0$, we can get $a = -b$. Substituting this value into $2a = 2$, we get $2(-b) = 2$, so $b = -1$. Therefore, $a = 1$. Answer option: A'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
||||||
|
dict(role='BOT', prompt='Response: We can use the Pythagorean theorem to solve this problem. The Pythagorean theorem tells us that if the two sides of a triangle are $a$ and $b$, then the length of the third side is $c = \\sqrt{a^2 + b^2}$. In this problem, $a = 3$ and $b = 4$, so $c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$. Answer option: A'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
dict(role='BOT', prompt='Response: {answer}'),
|
||||||
|
],
|
||||||
|
'cloze_cn': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 莎莎有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
|
||||||
|
dict(role='BOT', prompt='回答: 她有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
|
||||||
|
dict(role='BOT', prompt='回答: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 迈克尔有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
|
||||||
|
dict(role='BOT', prompt='回答: 迈克尔一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 奥利弗有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
|
||||||
|
dict(role='BOT', prompt='回答: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8'),
|
||||||
|
dict(role='HUMAN', prompt='问题: {question}'),
|
||||||
|
dict(role='BOT', prompt='回答: {answer}'),
|
||||||
|
],
|
||||||
|
'cloze_en': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
|
||||||
|
dict(role='BOT', prompt='Response: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
|
||||||
|
dict(role='BOT', prompt='Response: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
|
||||||
|
dict(role='BOT', prompt='Response: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
|
||||||
|
dict(role='BOT', prompt='Response: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
dict(role='BOT', prompt='Response: {answer}'),
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
mathbench_sets = {
|
||||||
|
# Practice Part
|
||||||
|
'college': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'high': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'middle': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'primary': ['cloze_cn', 'cloze_en'],
|
||||||
|
'arithmetic': ['cloze_en'],
|
||||||
|
# Theory part
|
||||||
|
'college_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'high_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'middle_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'primary_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
}
|
@ -0,0 +1,28 @@
|
|||||||
|
compassbench_v1_reason_datasets = [
|
||||||
|
dict(path='data/compassbench_v1.1/reason/CN_Commonsense.jsonl',
|
||||||
|
abbr='reasonbench_cn_commonsense_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/CN_Abductive_Alphanlg_translated.jsonl',
|
||||||
|
abbr='reasonbench_cn_abductive_alphanlg_translated_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/CN_Deductive_BBH3obj_translated.jsonl',
|
||||||
|
abbr='reasonbench_cn_deductive_bbh3obj_translated_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/CN_Deductive_LogiQA_zh.jsonl',
|
||||||
|
abbr='reasonbench_cn_deductive_logiqa_zh_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/CN_Inductive_DEER_translated.jsonl',
|
||||||
|
abbr='reasonbench_cn_inductive_deer_translated_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/CN_Inductive_selfgenerated.jsonl',
|
||||||
|
abbr='reasonbench_cn_inductive_selfgenerated_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/EN_Commonsense.jsonl',
|
||||||
|
abbr='reasonbench_en_commonsense_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/EN_Abductive_Alphanlg.jsonl',
|
||||||
|
abbr='reasonbench_en_abductive_alphanlg_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/EN_Deductive_BBH7obj.jsonl',
|
||||||
|
abbr='reasonbench_en_deductive_bbh7obj_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/EN_Deductive_LogiQA_zh_translated.jsonl',
|
||||||
|
abbr='reasonbench_en_deductive_logiqa_zh_translated_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/EN_Deductive_OCNLI_translated.jsonl',
|
||||||
|
abbr='reasonbench_en_deductive_ocnli_translated_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/EN_Inductive_DEER.jsonl',
|
||||||
|
abbr='reasonbench_en_inductive_deer_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1/reason/EN_Inductive_selfgenerated.jsonl',
|
||||||
|
abbr='reasonbench_en_inductive_selfgenerated_circular', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
]
|
@ -0,0 +1,57 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import AgentInferencer
|
||||||
|
from opencompass.datasets import CIBenchDataset, CIBenchEvaluator
|
||||||
|
|
||||||
|
libs = [
|
||||||
|
'/lightgbm',
|
||||||
|
'/matplotlib',
|
||||||
|
'/nltk',
|
||||||
|
'/opencv',
|
||||||
|
'/pandas',
|
||||||
|
'/pytorch',
|
||||||
|
'/scipy',
|
||||||
|
'/seaborn',
|
||||||
|
'/sklearn',
|
||||||
|
'/tensorflow',
|
||||||
|
'_chinese/lightgbm',
|
||||||
|
'_chinese/matplotlib',
|
||||||
|
'_chinese/nltk',
|
||||||
|
'_chinese/opencv',
|
||||||
|
'_chinese/pandas',
|
||||||
|
'_chinese/pytorch',
|
||||||
|
'_chinese/scipy',
|
||||||
|
'_chinese/seaborn',
|
||||||
|
'_chinese/sklearn',
|
||||||
|
'_chinese/tensorflow',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
cibench_datasets = []
|
||||||
|
for lib in libs:
|
||||||
|
cibench_reader_cfg = dict(
|
||||||
|
input_columns=['questions'], output_column='references', train_split='test', test_split='test'
|
||||||
|
)
|
||||||
|
|
||||||
|
cibench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template='{questions}',
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=AgentInferencer, infer_mode='every'),
|
||||||
|
)
|
||||||
|
|
||||||
|
cibench_eval_cfg = dict(evaluator=dict(type=CIBenchEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
cibench_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr=f'cibench_template{lib}_public',
|
||||||
|
type=CIBenchDataset,
|
||||||
|
path=f'data/compassbench_v1.1.public/agent-cibench/cibench_template{lib}',
|
||||||
|
internet_check=False,
|
||||||
|
reader_cfg=cibench_reader_cfg,
|
||||||
|
infer_cfg=cibench_infer_cfg,
|
||||||
|
eval_cfg=cibench_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,56 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import ChatInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import TEvalEvaluator
|
||||||
|
from opencompass.datasets import teval_postprocess, TEvalDataset
|
||||||
|
|
||||||
|
plugin_eval_subject_mapping = {
|
||||||
|
'instruct': ['instruct_v1'],
|
||||||
|
'instruct_zh': ['instruct_v1_zh'],
|
||||||
|
'plan': ['plan_json_v1', 'plan_str_v1'],
|
||||||
|
'plan_zh': ['plan_json_v1_zh', 'plan_str_v1_zh'],
|
||||||
|
'review': ['review_str_v1'],
|
||||||
|
'review_zh': ['review_str_v1_zh'],
|
||||||
|
'reason_retrieve_understand': ['reason_retrieve_understand_json_v1'],
|
||||||
|
'reason_retrieve_understand_zh': ['reason_retrieve_understand_json_v1_zh'],
|
||||||
|
'reason': ['reason_str_v1'],
|
||||||
|
'reason_zh': ['reason_str_v1_zh'],
|
||||||
|
'retrieve': ['retrieve_str_v1'],
|
||||||
|
'retrieve_zh': ['retrieve_str_v1_zh'],
|
||||||
|
'understand': ['understand_str_v1'],
|
||||||
|
'understand_zh': ['understand_str_v1_zh'],
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin_eval_datasets = []
|
||||||
|
for _name in plugin_eval_subject_mapping:
|
||||||
|
plugin_eval_reader_cfg = dict(input_columns=['prompt'], output_column='ground_truth')
|
||||||
|
plugin_eval_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='{prompt}'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=ChatInferencer),
|
||||||
|
)
|
||||||
|
plugin_eval_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=TEvalEvaluator, subset=_name),
|
||||||
|
pred_postprocessor=dict(type=teval_postprocess),
|
||||||
|
num_gpus=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
for subset in plugin_eval_subject_mapping[_name]:
|
||||||
|
plugin_eval_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='plugin_eval-mus-p10-' + subset + '_public',
|
||||||
|
type=TEvalDataset,
|
||||||
|
path='data/compassbench_v1.1.public/agent-teval-p10',
|
||||||
|
name=subset,
|
||||||
|
reader_cfg=plugin_eval_reader_cfg,
|
||||||
|
infer_cfg=plugin_eval_infer_cfg,
|
||||||
|
eval_cfg=plugin_eval_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,291 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, HumanEvalPlusEvaluator, humaneval_postprocess_v2
|
||||||
|
from opencompass.datasets import MBPPDataset, SanitizedMBPPDataset, MBPPEvaluator
|
||||||
|
from opencompass.datasets import HumanevalXDataset, HumanevalXEvaluator
|
||||||
|
from opencompass.datasets import LCDataset, LCPassKEvaluator
|
||||||
|
from opencompass.datasets import TACODataset, TACOEvaluator
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets = []
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- HumanEval CN ---------------------------------------------------------------
|
||||||
|
humaneval_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
humaneval_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='完成以下Python代码任务:\n{prompt}'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='openai_humaneval_cn_public',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='data/compassbench_v1.1.public/code/humaneval_cn/human-eval-cn-v2-20210705.jsonl',
|
||||||
|
reader_cfg=humaneval_reader_cfg,
|
||||||
|
infer_cfg=humaneval_infer_cfg,
|
||||||
|
eval_cfg=humaneval_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- HumanEval Plus ---------------------------------------------------------------
|
||||||
|
humaneval_plus_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
# TODO: allow empty output-column
|
||||||
|
humaneval_plus_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Complete the following python code:\n{prompt}'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_plus_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalPlusEvaluator),
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='humaneval_plus_public',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='data/compassbench_v1.1.public/code/humaneval/human-eval-v2-20210705.jsonl',
|
||||||
|
reader_cfg=humaneval_plus_reader_cfg,
|
||||||
|
infer_cfg=humaneval_plus_infer_cfg,
|
||||||
|
eval_cfg=humaneval_plus_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- MBPP CN ---------------------------------------------------------------
|
||||||
|
mbpp_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_list_2', test_range='[0:5]')
|
||||||
|
|
||||||
|
mbpp_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个函数,从给定的两个元组列表中查找相似的元素。 你的代码应该通过这些测试:\n\n assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\n assert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4) \n assert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14) \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res)' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个 Python 函数来识别一个整数是否不是素数。 你的代码应该通过这些测试:\n\n assert is_not_prime(2) == False \n assert is_not_prime(10) == True \n assert is_not_prime(35) == True \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是:编写一个函数,使用堆队列算法从给定的数字列表中查找最大整数。 你的代码应该通过这些测试:\n\n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,你的任务是: {text} 你的代码应该通过这些测试:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
mbpp_eval_cfg = dict(evaluator=dict(type=MBPPEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=MBPPDataset,
|
||||||
|
abbr='mbpp_cn_public',
|
||||||
|
path='data/compassbench_v1.1.public/code/mbpp_cn/mbpp_cn.jsonl',
|
||||||
|
reader_cfg=mbpp_reader_cfg,
|
||||||
|
infer_cfg=mbpp_infer_cfg,
|
||||||
|
eval_cfg=mbpp_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- Sanitized MBPP ---------------------------------------------------------------
|
||||||
|
sanitized_mbpp_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_list_2', test_range='[0:5]')
|
||||||
|
|
||||||
|
sanitized_mbpp_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the similar elements from the given two tuple lists. Your code should pass these tests:\n\n assert similar_elements((3, 4, 5, 6),(5, 7, 4, 10)) == (4, 5)\n assert similar_elements((1, 2, 3, 4),(5, 4, 3, 7)) == (3, 4) \n assert similar_elements((11, 12, 14, 13),(17, 15, 14, 13)) == (13, 14) \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def similar_elements(test_tup1, test_tup2):\r\n res = tuple(set(test_tup1) & set(test_tup2))\r\n return (res)' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a python function to identify non-prime numbers. Your code should pass these tests:\n\n assert is_not_prime(2) == False \n assert is_not_prime(10) == True \n assert is_not_prime(35) == True \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import math\r\ndef is_not_prime(n):\r\n result = False\r\n for i in range(2,int(math.sqrt(n)) + 1):\r\n if n % i == 0:\r\n result = True\r\n return result' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: Write a function to find the largest integers from a given list of numbers using heap queue algorithm. Your code should pass these tests:\n\n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],3)==[85, 75, 65] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],2)==[85, 75] \n assert heap_queue_largest( [25, 35, 22, 85, 14, 65, 75, 22, 58],5)==[85, 75, 65, 58, 35] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'import heapq as hq\r\ndef heap_queue_largest(nums,n):\r\n largest_nums = hq.nlargest(n, nums)\r\n return largest_nums' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
sanitized_mbpp_eval_cfg = dict(evaluator=dict(type=MBPPEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=SanitizedMBPPDataset,
|
||||||
|
abbr='sanitized_mbpp_public',
|
||||||
|
path='data/compassbench_v1.1.public/code/mbpp/sanitized-mbpp.jsonl',
|
||||||
|
reader_cfg=sanitized_mbpp_reader_cfg,
|
||||||
|
infer_cfg=sanitized_mbpp_infer_cfg,
|
||||||
|
eval_cfg=sanitized_mbpp_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- HumanevalX ---------------------------------------------------------------
|
||||||
|
humanevalx_reader_cfg = dict(input_columns=['prompt'], output_column='declaration', train_split='test')
|
||||||
|
|
||||||
|
humanevalx_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template='{prompt}'),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||||
|
)
|
||||||
|
|
||||||
|
humanevalx_eval_cfg_dict = {
|
||||||
|
lang: dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=HumanevalXEvaluator,
|
||||||
|
language=lang,
|
||||||
|
ip_address=
|
||||||
|
'localhost', # replace to your code_eval_server ip_address, port
|
||||||
|
port=5001,
|
||||||
|
), # refer to https://opencompass.readthedocs.io/en/latest/advanced_guides/code_eval_service.html to launch a server
|
||||||
|
)
|
||||||
|
for lang in ['python', 'cpp', 'go', 'java', 'js'] # do not support rust now
|
||||||
|
}
|
||||||
|
|
||||||
|
# Please download the needed `xx.jsonl.gz` from
|
||||||
|
# https://github.com/THUDM/CodeGeeX2/tree/main/benchmark/humanevalx
|
||||||
|
# and move them into `data/humanevalx/` folder
|
||||||
|
for lang in ['python', 'cpp', 'go', 'java', 'js']:
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=HumanevalXDataset,
|
||||||
|
abbr=f'humanevalx-{lang}_public',
|
||||||
|
language=lang,
|
||||||
|
path='data/compassbench_v1.1.public/code/humanevalx',
|
||||||
|
reader_cfg=humanevalx_reader_cfg,
|
||||||
|
infer_cfg=humanevalx_infer_cfg,
|
||||||
|
eval_cfg=humanevalx_eval_cfg_dict[lang],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- LCBench ---------------------------------------------------------------
|
||||||
|
LC_difficulties_list = ['EASY', 'MEDIUM', 'HARD']
|
||||||
|
LC_reader_cfg = dict(input_columns=['text', 'test_list'], output_column='test_column')
|
||||||
|
|
||||||
|
|
||||||
|
LC_en_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given three positive integers n, x, and y.\nIn a city, there exist houses numbered 1 to n connected by n streets. There is a street connecting the house numbered i with the house numbered i + 1 for all 1 <= i <= n - 1 . An additional street connects the house numbered x with the house numbered y.\nFor each k, such that 1 <= k <= n, you need to find the number of pairs of houses (house1, house2) such that the minimum number of streets that need to be traveled to reach house2 from house1 is k.\nReturn a 1-indexed array result of length n where result[k] represents the total number of pairs of houses such that the minimum streets required to reach one house from the other is k.\nNote that x and y can be equal. Your code should pass these tests:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: You are given a string word containing lowercase English letters.\nTelephone keypads have keys mapped with distinct collections of lowercase English letters, which can be used to form words by pushing them. For example, the key 2 is mapped with ["a","b","c"], we need to push the key one time to type "a", two times to type "b", and three times to type "c" .\nIt is allowed to remap the keys numbered 2 to 9 to distinct collections of letters. The keys can be remapped to any amount of letters, but each letter must be mapped to exactly one key. You need to find the minimum number of times the keys will be pushed to type the string word.\nReturn the minimum number of pushes needed to type word after remapping the keys.\nAn example mapping of letters to keys on a telephone keypad is given below. Note that 1, *, #, and 0 do not map to any letters. Your code should pass these tests:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt="You are an expert Python programmer, and here is your task: You are given an integer k and an integer x.\nConsider s is the 1-indexed binary representation of an integer num. The price of a number num is the number of i's such that i % x == 0 and s[i] is a set bit.\nReturn the greatest integer num such that the sum of prices of all numbers from 1 to num is less than or equal to k.\nNote:\nIn the binary representation of a number set bit is a bit of value 1.\nThe binary representation of a number will be indexed from right to left. For example, if s == 11100, s[4] == 1 and s[2] == 0. Your code should pass these tests:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n"),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='You are an expert Python programmer, and here is your task: {text} Your code should pass these tests:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
LC_cn_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你三个 正整数 n 、x 和 y 。\n在城市中,存在编号从 1 到 n 的房屋,由 n 条街道相连。对所有 1 <= i < n ,都存在一条街道连接编号为 i 的房屋与编号为 i + 1 的房屋。另存在一条街道连接编号为 x 的房屋与编号为 y 的房屋。\n对于每个 k(1 <= k <= n),你需要找出所有满足要求的 房屋对 [house1, house2] ,即从 house1 到 house2 需要经过的 最少 街道数为 k 。\n返回一个下标从 1 开始且长度为 n 的数组 result ,其中 result[k] 表示所有满足要求的房屋对的数量,即从一个房屋到另一个房屋需要经过的 最少 街道数为 k 。\n注意,x 与 y 可以 相等。你的代码需要通过以下测试:\n\n assert countOfPairs(n = 3, x = 1, y = 3) == [6,0,0]\n assert countOfPairs(n = 5, x = 2, y = 4) == [10,8,2,0,0] \n assert countOfPairs(n = 4, x = 1, y = 1) == [6,4,2,0] \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'from itertools import accumulate\ndef countOfPairs(n, x, y):\n x, y = min(x, y), max(x, y)\n A = [0] * n\n for i in range(1, n + 1):\n A[0] += 2 \n A[min(i - 1, abs(i - y) + x)] -= 1 \n A[min(n - i, abs(i - x) + 1 + n - y)] -= 1 \n A[min(abs(i - x), abs(y - i) + 1)] += 1 \n A[min(abs(i - x) + 1, abs(y - i))] += 1 \n r = max(x - i, 0) + max(i - y, 0)\n A[r + (y - x + 0) // 2] -= 1 \n A[r + (y - x + 1) // 2] -= 1 \n return list(accumulate(A))' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个字符串 word,由 不同 小写英文字母组成。\n电话键盘上的按键与 不同 小写英文字母集合相映射,可以通过按压按键来组成单词。例如,按键 2 对应 ["a","b","c"],我们需要按一次键来输入 "a",按两次键来输入 "b",按三次键来输入 "c"。\n现在允许你将编号为 2 到 9 的按键重新映射到 不同 字母集合。每个按键可以映射到 任意数量 的字母,但每个字母 必须 恰好 映射到 一个 按键上。你需要找到输入字符串 word 所需的 最少 按键次数。\n返回重新映射按键后输入 word 所需的 最少 按键次数。\n下面给出了一种电话键盘上字母到按键的映射作为示例。注意 1,*,# 和 0 不 对应任何字母。你的代码需要通过以下测试:\n\n assert minimumPushes("abcde") == 5 \n assert minimumPushes("xyzxyzxyzxyz") == 12 \n assert minimumPushes("aabbccddeeffgghhiiiiii") == 24 \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def minimumPushes(word):\n letter_counts = {}\n for c in word:\n letter_counts[c] = letter_counts.get(c, 0) + 1\n counts = list(letter_counts.values())\n counts.sort(reverse=True)\n ans, row = 0, 1\n for i in range(len(counts)):\n if i > 7 and i % 8 == 0:\n row += 1\n ans += row * counts[i]\n return ans' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: 给你一个整数 k 和一个整数 x 。\n令 s 为整数 num 的下标从 1 开始的二进制表示。我们说一个整数 num 的 价值 是满足 i % x == 0 且 s[i] 是 设置位 的 i 的数目。\n请你返回 最大 整数 num ,满足从 1 到 num 的所有整数的 价值 和小于等于 k 。\n注意:\n一个整数二进制表示下 设置位 是值为 1 的数位。\n一个整数的二进制表示下标从右到左编号,比方说如果 s == 11100 ,那么 s[4] == 1 且 s[2] == 0。你的代码需要通过以下测试:\n\n assert findMaximumNumber(k = 9, x = 1) == 6 \n assert findMaximumNumber(k = 7, x = 2) == 9 \n'),
|
||||||
|
dict(role='BOT', prompt="[BEGIN]\n 'def findMaximumNumber(k, x):\n def check(v):\n A = list(map(int, bin(v)[2:]))\n n = len(A)\n res = p = 0\n for i,v in enumerate(A):\n if v == 1:\n l = n - i - 1\n res += (p << l) + ((l // x) << (l - 1) if l else 0)\n if (n - i) % x == 0:\n p += v\n return res + p\n l, r = 1, 10 ** 15\n while l < r:\n mid = (l + r + 1) // 2\n if check(mid) <= k:\n l = mid\n else:\n r = mid - 1\n return l' \n[DONE] \n\n "),
|
||||||
|
dict(role='HUMAN', prompt='你是一名专业的 Python 程序员,下面是你的任务: {text} 你的代码需要通过以下测试:\n\n {test_list} \n'),
|
||||||
|
dict(role='BOT', prompt='[BEGIN]\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
LC_eval_cfg = dict(evaluator=dict(type=LCPassKEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
for difficulty in LC_difficulties_list:
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=LCDataset,
|
||||||
|
abbr='lcbench_en-' + difficulty + '_public',
|
||||||
|
path='data/compassbench_v1.1.public/code/LCBench2023/LCBench2023.jsonl',
|
||||||
|
difficulty=difficulty,
|
||||||
|
reader_cfg=LC_reader_cfg,
|
||||||
|
infer_cfg=LC_en_infer_cfg,
|
||||||
|
eval_cfg=LC_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=LCDataset,
|
||||||
|
abbr='lcbench_cn-' + difficulty + '_public',
|
||||||
|
path='data/compassbench_v1.1.public/code/LCBench2023/LCBench2023_cn.jsonl',
|
||||||
|
difficulty=difficulty,
|
||||||
|
reader_cfg=LC_reader_cfg,
|
||||||
|
infer_cfg=LC_cn_infer_cfg,
|
||||||
|
eval_cfg=LC_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------- TACO ---------------------------------------------------------------
|
||||||
|
TACO_difficulties_list = ['EASY', 'MEDIUM', 'MEDIUM_HARD', 'HARD', 'VERY_HARD']
|
||||||
|
TACO_reader_cfg = dict(input_columns=['question', 'starter'], output_column='problem_id', train_split='test', test_range='[0:5]')
|
||||||
|
|
||||||
|
TACO_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Please write a python program to address the following QUESTION. Your ANSWER should be in a code block format like this: ```python # Write your code here ```. \nQUESTION:\n{question} {starter}\nANSWER:\n'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||||
|
)
|
||||||
|
|
||||||
|
TACO_eval_cfg = dict(evaluator=dict(type=TACOEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
for difficulty in TACO_difficulties_list:
|
||||||
|
compassbench_v1_1_code_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=TACODataset,
|
||||||
|
abbr='TACO-' + difficulty + '_public',
|
||||||
|
path='data/compassbench_v1.1.public/code/BAAI-TACO',
|
||||||
|
difficulty=difficulty,
|
||||||
|
reader_cfg=TACO_reader_cfg,
|
||||||
|
infer_cfg=TACO_infer_cfg,
|
||||||
|
eval_cfg=TACO_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,133 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
||||||
|
from opencompass.datasets import WikiBenchDataset
|
||||||
|
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||||
|
|
||||||
|
# ----------------------- Prompt Content----------------------- #
|
||||||
|
zero_shot_prompts = {
|
||||||
|
'single_choice_prompts': [
|
||||||
|
dict(role='HUMAN', prompt='以下是一道单项选择题,请你根据你了解的知识给出正确的答案选项。\n下面是你要回答的题目::\n{question}\n答案选项:'),
|
||||||
|
dict(role='BOT', prompt='{answer}')
|
||||||
|
]
|
||||||
|
}
|
||||||
|
few_shot_prompts = {
|
||||||
|
'single_choice_prompts': {
|
||||||
|
'single_choice_cn': [
|
||||||
|
dict(role='HUMAN', prompt='题目:“一丝不苟”中的“丝”的本意是( )。\nA. 计量单位\nB. 丝线\nC. 丝绸\nD. 发丝'),
|
||||||
|
dict(role='BOT', prompt='答案:A'),
|
||||||
|
dict(role='HUMAN', prompt='题目:五华县体育场位于哪个省?\nA. 湖南省\nB. 浙江省\nC. 广东省\nD. 江苏省'),
|
||||||
|
dict(role='BOT', prompt='答案:C'),
|
||||||
|
dict(role='HUMAN', prompt='题目:“西施犬的原产地是哪里?\nA. 印度\nB. 中国\nC. 西藏\nD. 台湾'),
|
||||||
|
dict(role='BOT', prompt='答案:C'),
|
||||||
|
dict(role='HUMAN', prompt='题目:四库全书的四库是指什么?\nA. 易、书、诗、礼\nB. 经、史、子、音\nC. 诗、书、音、律\nD. 经、史、子、集'),
|
||||||
|
dict(role='BOT', prompt='答案:D'),
|
||||||
|
dict(role='HUMAN', prompt='题目:{question}'),
|
||||||
|
]}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------- Prompt Template----------------------- #
|
||||||
|
|
||||||
|
# Use Zero-Shot or not
|
||||||
|
with_few_shot = True
|
||||||
|
|
||||||
|
# Max for this dataset is 4, should be set with `with_few_shot`
|
||||||
|
few_shot_samples = 4
|
||||||
|
|
||||||
|
# Use circular evaluation or not
|
||||||
|
with_circular_eval = True
|
||||||
|
|
||||||
|
single_choice_prompts = zero_shot_prompts['single_choice_prompts'] if not with_few_shot else few_shot_prompts['single_choice_prompts']
|
||||||
|
|
||||||
|
# Set few shot prompt number
|
||||||
|
if with_few_shot:
|
||||||
|
assert few_shot_samples > 0
|
||||||
|
for _name in list(single_choice_prompts.keys()):
|
||||||
|
single_choice_prompts[_name] = single_choice_prompts[_name][- few_shot_samples * 2 - 2:]
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_sets = {
|
||||||
|
'common_knowledge': ['single_choice_cn'],
|
||||||
|
'humanity': ['single_choice_cn'],
|
||||||
|
'natural_science': ['single_choice_cn'],
|
||||||
|
'social_science': ['single_choice_cn'],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------- Dataset Config----------------------- #
|
||||||
|
compassbench_v1_knowledge_datasets = []
|
||||||
|
|
||||||
|
for _split in list(compassbench_v1_knowledge_sets.keys()):
|
||||||
|
for _name in compassbench_v1_knowledge_sets[_split]:
|
||||||
|
compassbench_v1_knowledge_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_infer_cfg = dict(
|
||||||
|
ice_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin='</E>',
|
||||||
|
round=single_choice_prompts[_name]
|
||||||
|
),
|
||||||
|
ice_token='</E>',
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer),
|
||||||
|
)
|
||||||
|
compassbench_v1_knowledge_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=CircularEvaluator if with_circular_eval else AccEvaluator),
|
||||||
|
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
|
||||||
|
)
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=WikiBenchDataset,
|
||||||
|
abbr='compassbench_v1_knowledge-' + _split + '-' + _name + '_' + ('circular' if with_circular_eval else '') + '_public',
|
||||||
|
path=f'data/compassbench_v1.1.public/knowledge/{_split}/{_name}.jsonl',
|
||||||
|
name=_name + '_circular' if with_circular_eval else _name,
|
||||||
|
reader_cfg=compassbench_v1_knowledge_reader_cfg,
|
||||||
|
infer_cfg=compassbench_v1_knowledge_infer_cfg,
|
||||||
|
eval_cfg=compassbench_v1_knowledge_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from opencompass.datasets import TriviaQADataset_V3, TriviaQAEvaluator
|
||||||
|
|
||||||
|
triviaqa_and_nq_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||||
|
|
||||||
|
triviaqa_and_nq_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Question: When do stores stop selling alcohol in indiana?'),
|
||||||
|
dict(role='BOT', prompt='Answer: 3 a.m.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Edinburgh of the Seven Seas is the capital of which group of islands?'),
|
||||||
|
dict(role='BOT', prompt='Answer: Tristan da Cunha'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Which book of the Christian Bible\'s new testament comprises a letter from St Paul to members of a church that he had founded at Macedonia?'),
|
||||||
|
dict(role='BOT', prompt='Answer: Philippians'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The Hindu deity Hanuman appears in the form of which animal?'),
|
||||||
|
dict(role='BOT', prompt='Answer: A monkey'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Who hosts the ITV quiz show The Chase?'),
|
||||||
|
dict(role='BOT', prompt='Answer: Bradley Walsh'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=50, stopping_criteria=['Question:']),
|
||||||
|
)
|
||||||
|
|
||||||
|
triviaqa_and_nq_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
compassbench_v1_knowledge_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=TriviaQADataset_V3,
|
||||||
|
abbr='compassbench_v1_knowledge-mixed-cloze_en_public',
|
||||||
|
path='data/compassbench_v1.1.public/knowledge/mixed/cloze_en.jsonl',
|
||||||
|
reader_cfg=triviaqa_and_nq_reader_cfg,
|
||||||
|
infer_cfg=triviaqa_and_nq_infer_cfg,
|
||||||
|
eval_cfg=triviaqa_and_nq_eval_cfg
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,46 @@
|
|||||||
|
from opencompass.openicl.icl_evaluator import EMEvaluator, BleuFloresEvaluator, RougeEvaluator, JiebaRougeEvaluator
|
||||||
|
|
||||||
|
compassbench_v1_language_datasets = [
|
||||||
|
# dict(path='data/compassbench_v1.1.public/language/information_retrieval_en.jsonl',
|
||||||
|
# abbr='information_retrieval_en_public', data_type= 'qa', infer_method='gen', output_column='answers',
|
||||||
|
# human_prompt='{context}\n{origin_question}\nConcise and direct answer is',
|
||||||
|
# evaluator=EMEvaluator),
|
||||||
|
# dict(path='data/compassbench_v1.1.public/language/information_retrieval_zh.jsonl',
|
||||||
|
# abbr='information_retrieval_zh_public', data_type= 'qa', infer_method='gen', output_column='answers',
|
||||||
|
# human_prompt='{context}\n{origin_question}\n简单直接的回答是',
|
||||||
|
# evaluator=EMEvaluator),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/intention_recognition_en.jsonl',
|
||||||
|
abbr='intention_recognition_en_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/intention_recognition_zh.jsonl',
|
||||||
|
abbr='intention_recognition_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/sentiment_analysis_en.jsonl',
|
||||||
|
abbr='sentiment_analysis_en_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/sentiment_analysis_zh.jsonl',
|
||||||
|
abbr='sentiment_analysis_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/translation.jsonl',
|
||||||
|
abbr='translation_public', data_type= 'qa', infer_method='gen',
|
||||||
|
evaluator=BleuFloresEvaluator),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/content_critic_en.jsonl',
|
||||||
|
abbr='content_critic_en_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/content_critic_zh.jsonl',
|
||||||
|
abbr='content_critic_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/content_summarization_en.jsonl',
|
||||||
|
abbr='content_summarization_en_public', data_type= 'qa', infer_method='gen', output_column='summary',
|
||||||
|
human_prompt='{article}\nSummary of the article is:\n',
|
||||||
|
evaluator=RougeEvaluator),
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/content_summarization_zh.jsonl',
|
||||||
|
abbr='content_summarization_zh_public', data_type= 'qa', infer_method='gen', output_column='summary',
|
||||||
|
human_prompt='{article}\n上述内容摘要如下:\n',
|
||||||
|
evaluator=JiebaRougeEvaluator),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/traditional_cultural_understanding_zh.jsonl',
|
||||||
|
abbr='traditional_cultural_understanding_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
|
||||||
|
dict(path='data/compassbench_v1.1.public/language/chinese_semantic_understanding_zh.jsonl',
|
||||||
|
abbr='chinese_semantic_understanding_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
]
|
@ -0,0 +1,81 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
from copy import deepcopy
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer, PPLInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
||||||
|
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
||||||
|
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .mathbench_prompt import zero_shot_prompts, few_shot_prompts, mathbench_sets
|
||||||
|
|
||||||
|
# Max for this dataset is 4
|
||||||
|
num_shot = 4
|
||||||
|
# Generate reasoning path or not, only for single choice
|
||||||
|
with_reasoning = False
|
||||||
|
# Use circular evaluation or not
|
||||||
|
with_circular_eval = True
|
||||||
|
# Use PPL mode in single choice test or not
|
||||||
|
use_ppl_single_choice = False
|
||||||
|
|
||||||
|
assert 0 <= num_shot <= 4
|
||||||
|
if num_shot == 0:
|
||||||
|
prompts = zero_shot_prompts
|
||||||
|
else:
|
||||||
|
prompts = {name: p[- 2 * num_shot - 2:] for name, p in few_shot_prompts.items()}
|
||||||
|
|
||||||
|
compassbench_v1_1_math_datasets = []
|
||||||
|
for _split in mathbench_sets:
|
||||||
|
for _name in mathbench_sets[_split]:
|
||||||
|
if 'single_choice' in _name:
|
||||||
|
if with_reasoning:
|
||||||
|
template_round = prompts[_name + '_with_reasoning']
|
||||||
|
else:
|
||||||
|
template_round = prompts[_name]
|
||||||
|
else:
|
||||||
|
template_round = prompts[_name]
|
||||||
|
|
||||||
|
if 'single_choice' in _name:
|
||||||
|
pred_postprocessor = dict(type=first_option_postprocess, options='ABCD')
|
||||||
|
else:
|
||||||
|
pred_postprocessor = dict(type=mathbench_postprocess, name=_name)
|
||||||
|
|
||||||
|
if 'single_choice' in _name and with_circular_eval:
|
||||||
|
evaluator = dict(type=CircularEvaluator)
|
||||||
|
else:
|
||||||
|
evaluator = dict(type=AccEvaluator)
|
||||||
|
|
||||||
|
# assemble the final config
|
||||||
|
mathbench_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||||
|
if use_ppl_single_choice and 'single_choice' in _name and not with_reasoning:
|
||||||
|
template = {}
|
||||||
|
for answer in ['A', 'B', 'C', 'D']:
|
||||||
|
one_template_round = deepcopy(template_round)
|
||||||
|
one_template_round['round'][-1]['prompt'] = one_template_round['round'][-1]['prompt'].format(answer=answer)
|
||||||
|
template[answer] = dict(round=one_template_round)
|
||||||
|
mathbench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=template),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=PPLInferencer),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
mathbench_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=dict(round=template_round)),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
||||||
|
)
|
||||||
|
mathbench_eval_cfg = dict(evaluator=evaluator, pred_postprocessor=pred_postprocessor)
|
||||||
|
|
||||||
|
compassbench_v1_1_math_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr='mathbench-' + _split + '-' + _name + '_public',
|
||||||
|
type=MathBenchDataset,
|
||||||
|
path=f'data/compassbench_v1.1.public/math/{_split}',
|
||||||
|
name=_name,
|
||||||
|
with_circular=with_circular_eval,
|
||||||
|
reader_cfg=mathbench_reader_cfg,
|
||||||
|
infer_cfg=mathbench_infer_cfg,
|
||||||
|
eval_cfg=mathbench_eval_cfg,
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,103 @@
|
|||||||
|
zero_shot_prompts = {
|
||||||
|
'single_choice_cn_with_reasoning': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你一步一步推理,并在最后用“所以答案为选项X”给出答案,其中“X”为选项A,B,C,D中你认为正确的选项。下面是你要回答的问题\n{question}\n让我们一步一步思考:'),
|
||||||
|
],
|
||||||
|
'single_choice_cn': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n所以答案是:'),
|
||||||
|
],
|
||||||
|
'single_choice_en_with_reasoning': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please reason through it step by step, and at the end, provide your answer option with "Therefore, the correct answer is option X", Where "X" is the correct option you think from A,B,C,D. Here is the question you need to answer:\n{question}\nLet\'s think step by step:'),
|
||||||
|
],
|
||||||
|
'single_choice_en': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nThe answer is:'),
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
few_shot_prompts = {
|
||||||
|
'single_choice_cn': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____?\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
|
||||||
|
dict(role='BOT', prompt='回答: D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='回答: D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
|
||||||
|
dict(role='BOT', prompt='回答: A'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 圆锥的底面半径为2,高为4.一个圆柱的下底面在圆锥的底面上,上底面的圆周在圆锥的侧面上,当圆柱侧面积为$4 \\pi$时,该圆柱的体积为____\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
||||||
|
dict(role='BOT', prompt='回答: B'),
|
||||||
|
dict(role='HUMAN', prompt='问题: {question}'),
|
||||||
|
dict(role='BOT', prompt='回答: {answer}'),
|
||||||
|
],
|
||||||
|
'single_choice_cn_with_reasoning' : [
|
||||||
|
dict(role='HUMAN', prompt='问题: 已知i是虚数单位,z为复数,$2+\\frac{1}{i}=z(3+i)$,则在复平面内z对应的点位于____\nA. 第一象限\nB. 第二象限\nC. 第三象限\nD. 第四象限'),
|
||||||
|
dict(role='BOT', prompt='回答: 因为首先,我们将等式两边同时乘以$(3 + i)$的共轭复数$(3 - i)$,以便消去分母中的虚数部分:$z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$,这样做的目的是利用复数乘法的性质,从而消去分母中的虚数部分。我们进行乘法运算:$z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$,$z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$,由于$i^2 = -1$,我们可以进一步简化:$z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{{1}{2} - \\frac{1}{2}i$,在复平面上,这个点位于第四象限,因为它的实部是正的($\\frac{1}{2}$),而虚部是负的($-\\frac{1}{2}$)\n答案选项:D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 将函数$y=\\tan(\\omega x-1)(\\omega>0)$的图像向左平移2个单位长度后,与函数$y=\\tan(\\omega x+3)$的图象重合,则的最小值等于____\nA. $2-\\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='回答: 将函数$y=\\tan(\\omega x-1)$(其中$\\omega>0$)的图像向左平移2个单位长度后,得到的新函数为$y=\\tan(\\omega(x+2)-1)$。为了使这个新函数的图像与$y=\\tan(\\omega x+3)$的图像重合,我们需要满足以下条件:\n$\\tan(\\omega(x+2)-1) = \\tan(\\omega x+3)$\n由于正切函数的周期性,我们知道$\\tan(\\theta) = \\tan(\\theta + k\\pi)$,其中$k$是任意整数。因此,我们可以写出:\n$\\omega(x+2)-1 = \\omega x+3 + k\\pi$\n解这个方程,我们得到:$\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$\n$2\\omega - 1 = 3 + k\\pi$\n$\\omega = 2 + \\frac{k\\pi}{2}$\n由于$\\omega>0$,我们需要找到满足条件的最小正整数$k$。当$k=0$时,我们得到$\\omega=2$,这是满足条件的最小正值。因此,最小值等于2。答案选项:D'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 设$(1+2i)a+b=2i$,其中a,b为实数,则( )\nA. $a=1,b=-1$\nB. $a=1,b=1$\nC. $a=-1,b=1$\nD. $a=-1,b=-1$'),
|
||||||
|
dict(role='BOT', prompt='回答: 我们可以将这个方程写成:$a + 2ai + b = 2i$,然后将实部和虚部分开:\n$a + b = 0$\n$2a = 2$\n由于$a + b = 0$,我们可以得到$a = -b$。将这个值代入$2a = 2$,我们得到$2(-b) = 2$,因此$b = -1$。因此,$a = 1$。答案选项:A'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 一直角三角形的两边长分别为3和4.则第三边的长为____\nA. 5\nB. $\\sqrt 7$\nC. $\\sqrt 5$\nD. 5或者$\\sqrt 7$'),
|
||||||
|
dict(role='BOT', prompt='回答: 我们可以使用勾股定理来解决这个问题。勾股定理告诉我们,如果一个三角形的两条边长分别为$a$和$b$,那么第三条边的长度为$c = \\sqrt{a^2 + b^2}$。在这个问题中,$a = 3$,$b = 4$,因此$c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$。答案选项:A'),
|
||||||
|
dict(role='HUMAN', prompt='问题: {question}'),
|
||||||
|
dict(role='BOT', prompt='回答: {answer}'),
|
||||||
|
],
|
||||||
|
'single_choice_en': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
|
||||||
|
dict(role='BOT', prompt='Response: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='Response: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
|
||||||
|
dict(role='BOT', prompt='Response: A'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
||||||
|
dict(role='BOT', prompt='Response: B'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
dict(role='BOT', prompt='Response: {answer}'),
|
||||||
|
],
|
||||||
|
'single_choice_en_with_reasoning': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Given that $i$ is the imaginary unit and $z$ is a complex number, $2 + \\frac{1}{i} = z(3 + i)$, the point corresponding to $z$ in the complex plane is located in the ______\nA. First quadrant\nB. Second quadrant\nC. Third quadrant\nD. Fourth quadrant'),
|
||||||
|
dict(role='BOT', prompt='Response: First, we multiply both sides of the equation by the conjugate of $(3 + i)$: $z = \\frac{2 - i}{3 + i} \\cdot \\frac{3 - i}{3 - i}$. We perform the multiplication: $z = \\frac{(2 - i)(3 - i)}{(3 + i)(3 - i)}$, $z = \\frac{6 - 2i - 3i + i^2}{9 - 3i + 3i - i^2}$. Since $i^2 = -1$, we can simplify this further: $z = \\frac{6 - 5i - 1}{9 + 1}$ $z = \\frac{1}{2} - \\frac{1}{2}i$. In the complex plane, this point is located in the fourth quadrant, because its real part is positive ($\\frac{1}{2}$) and its imaginary part is negative ($-\\frac{1}{2}$)\nAnswer option: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The graph of the function $y = \\tan(\\omega x - 1)$ is shifted 2 units to the left and coincides with the graph of the function $y = \\tan(\\omega x + 3)$. The minimum value of $\\omega$ is ______\nA. $2 - \\frac{\\pi}{2}$\nB. 1\nC. $\\pi - 2$\nD. 2'),
|
||||||
|
dict(role='BOT', prompt='Response: In order for the graph of this new function to coincide with the graph of $y = \\tan(\\omega x + 3)$, we need to satisfy the following condition: $\\tan(\\omega(x + 2) - 1) = \\tan(\\omega x + 3)$. Therefore, we can write: $\\omega(x + 2) - 1 = \\omega x + 3 + k\\pi$. Solving this equation, we get: $\\omega x + 2\\omega - 1 = \\omega x + 3 + k\\pi$. $2\\omega - 1 = 3 + k\\pi$. $\\omega = 2 + \\frac{k\\pi}{2}$. Since $\\omega > 0$, we need to find the smallest positive integer $k$ that satisfies the condition. When $k = 0$, we get $\\omega = 2$, which is the smallest positive value that satisfies the condition. Therefore, the minimum value is 2. Answer option: D'),
|
||||||
|
dict(role='HUMAN', prompt='Question: If $(1 + 2i)a + b = 2i$, where $a$ and $b$ are real numbers, then ______\nA. $a = 1, b = -1$\nB. $a = 1, b = 1$\nC. $a = -1, b = 1$\nD. $a = -1, b = -1$'),
|
||||||
|
dict(role='BOT', prompt='Response: We can write this equation as: $a + 2ai + b = 2i$, and then separate the real and imaginary parts: $a + b = 0$. $2a = 2$. Since $a + b = 0$, we can get $a = -b$. Substituting this value into $2a = 2$, we get $2(-b) = 2$, so $b = -1$. Therefore, $a = 1$. Answer option: A'),
|
||||||
|
dict(role='HUMAN', prompt='Question: The radius of a cone is 2 and its height is 4. A cylinder has its lower base on the base of the cone and its upper base on the lateral surface of the cone. When the lateral surface area of the cylinder is $4\\pi$, the volume of the cylinder is ______\nA. $\\pi$\nB. $2\\pi$\nC. $3\\pi$\nD. $4\\pi$'),
|
||||||
|
dict(role='BOT', prompt='Response: We can use the Pythagorean theorem to solve this problem. The Pythagorean theorem tells us that if the two sides of a triangle are $a$ and $b$, then the length of the third side is $c = \\sqrt{a^2 + b^2}$. In this problem, $a = 3$ and $b = 4$, so $c = \\sqrt{3^2 + 4^2} = \\sqrt{9 + 16} = \\sqrt{25} = 5$. Answer option: A'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
dict(role='BOT', prompt='Response: {answer}'),
|
||||||
|
],
|
||||||
|
'cloze_cn': [
|
||||||
|
dict(role='HUMAN', prompt='问题: 莎莎有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
|
||||||
|
dict(role='BOT', prompt='回答: 她有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
|
||||||
|
dict(role='BOT', prompt='回答: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 迈克尔有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
|
||||||
|
dict(role='BOT', prompt='回答: 迈克尔一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33'),
|
||||||
|
dict(role='HUMAN', prompt='问题: 奥利弗有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
|
||||||
|
dict(role='BOT', prompt='回答: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8'),
|
||||||
|
dict(role='HUMAN', prompt='问题: {question}'),
|
||||||
|
dict(role='BOT', prompt='回答: {answer}'),
|
||||||
|
],
|
||||||
|
'cloze_en': [
|
||||||
|
dict(role='HUMAN', prompt='Question: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
|
||||||
|
dict(role='BOT', prompt='Response: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
|
||||||
|
dict(role='BOT', prompt='Response: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
|
||||||
|
dict(role='BOT', prompt='Response: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
|
||||||
|
dict(role='BOT', prompt='Response: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.'),
|
||||||
|
dict(role='HUMAN', prompt='Question: {question}'),
|
||||||
|
dict(role='BOT', prompt='Response: {answer}'),
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
mathbench_sets = {
|
||||||
|
# Practice Part
|
||||||
|
'college': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'high': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'middle': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'primary': ['cloze_cn', 'cloze_en'],
|
||||||
|
'arithmetic': ['cloze_en'],
|
||||||
|
# Theory part
|
||||||
|
'college_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'high_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'middle_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
'primary_knowledge': ['single_choice_cn', 'single_choice_en'],
|
||||||
|
}
|
@ -0,0 +1,28 @@
|
|||||||
|
compassbench_v1_reason_datasets = [
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/CN_Commonsense.jsonl',
|
||||||
|
abbr='reasonbench_cn_commonsense_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/CN_Abductive_Alphanlg_translated.jsonl',
|
||||||
|
abbr='reasonbench_cn_abductive_alphanlg_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/CN_Deductive_BBH3obj_translated.jsonl',
|
||||||
|
abbr='reasonbench_cn_deductive_bbh3obj_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/CN_Deductive_LogiQA_zh.jsonl',
|
||||||
|
abbr='reasonbench_cn_deductive_logiqa_zh_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/CN_Inductive_DEER_translated.jsonl',
|
||||||
|
abbr='reasonbench_cn_inductive_deer_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/CN_Inductive_selfgenerated.jsonl',
|
||||||
|
abbr='reasonbench_cn_inductive_selfgenerated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/EN_Commonsense.jsonl',
|
||||||
|
abbr='reasonbench_en_commonsense_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/EN_Abductive_Alphanlg.jsonl',
|
||||||
|
abbr='reasonbench_en_abductive_alphanlg_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/EN_Deductive_BBH7obj.jsonl',
|
||||||
|
abbr='reasonbench_en_deductive_bbh7obj_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/EN_Deductive_LogiQA_zh_translated.jsonl',
|
||||||
|
abbr='reasonbench_en_deductive_logiqa_zh_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/EN_Deductive_OCNLI_translated.jsonl',
|
||||||
|
abbr='reasonbench_en_deductive_ocnli_translated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/EN_Inductive_DEER.jsonl',
|
||||||
|
abbr='reasonbench_en_inductive_deer_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
dict(path='data/compassbench_v1.1.public/reason/EN_Inductive_selfgenerated.jsonl',
|
||||||
|
abbr='reasonbench_en_inductive_selfgenerated_circular_public', data_type='circular-mcq', infer_method='gen'),
|
||||||
|
]
|
@ -1,18 +1,4 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
drop_examples = '''\
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
|
||||||
from opencompass.datasets import DropOpenAIDataset, DropOpenAIEvaluator
|
|
||||||
|
|
||||||
drop_reader_cfg = dict(
|
|
||||||
input_columns=['prompt'],
|
|
||||||
output_column='answers',
|
|
||||||
train_split='validation',
|
|
||||||
test_split='validation',
|
|
||||||
)
|
|
||||||
|
|
||||||
template = '''\
|
|
||||||
You will be asked to read a passage and answer a question. Some examples of passages and Q&A are provided below.
|
|
||||||
|
|
||||||
# Examples
|
# Examples
|
||||||
---
|
---
|
||||||
Passage: Trunajaya rebellion or Trunajaya War was the ultimately unsuccessful rebellion waged by the Madurese prince Trunajaya and fighters from Makassar against the Mataram Sultanate and its Dutch East India Company supporters in Java during the 1670s. The rebellion was initially successful: the rebels defeated the royal army at Gegodog , captured most of the Javanese north coast, and took the Mataram capital Plered . King Amangkurat I died during the retreat of the royal court. His son and successor, Amangkurat II, requested help from the VOC in exchange for financial remuneration and geopolitical concessions. The VOC\'s subsequent involvement turned the tide of the war. VOC and Mataram forces recovered lost territories and overran Trunajaya\'s new capital at Kediri . However, the rebellion continued until the capture of Trunajaya at the end of 1679, and the defeat, death, or surrender of the other rebel leaders . Trunajaya was killed by Amangkurat II personally in 1680 while a prisoner of the VOC. After his father\'s death in 1677, Amangkurat II also faced rival claims to the throne. The most serious rival was his brother Pangeran Puger, who took the capital Plered in 1677 and did not surrender until 1681.
|
Passage: Trunajaya rebellion or Trunajaya War was the ultimately unsuccessful rebellion waged by the Madurese prince Trunajaya and fighters from Makassar against the Mataram Sultanate and its Dutch East India Company supporters in Java during the 1670s. The rebellion was initially successful: the rebels defeated the royal army at Gegodog , captured most of the Javanese north coast, and took the Mataram capital Plered . King Amangkurat I died during the retreat of the royal court. His son and successor, Amangkurat II, requested help from the VOC in exchange for financial remuneration and geopolitical concessions. The VOC\'s subsequent involvement turned the tide of the war. VOC and Mataram forces recovered lost territories and overran Trunajaya\'s new capital at Kediri . However, the rebellion continued until the capture of Trunajaya at the end of 1679, and the defeat, death, or surrender of the other rebel leaders . Trunajaya was killed by Amangkurat II personally in 1680 while a prisoner of the VOC. After his father\'s death in 1677, Amangkurat II also faced rival claims to the throne. The most serious rival was his brother Pangeran Puger, who took the capital Plered in 1677 and did not surrender until 1681.
|
||||||
@ -27,29 +13,4 @@ Answer: Jacobs
|
|||||||
---
|
---
|
||||||
Passage: Hoping to rebound from their road loss to the Bills, the Chargers flew to Wembley Stadium for the 2008 International Series game with the New Orleans Saints. In the first quarter, San Diego trailed early as kicker Taylor Mehlhaff got a 23-yard field goal. The \'Bolts would respond with kicker Nate Kaeding getting a 33-yard field goal. In the second quarter, New Orleans regained the lead as QB Drew Brees (a former Charger) completed a 12-yard TD pass to WR Devery Henderson (with a failed PAT) and RB Deuce McAllister getting a 1-yard TD run. San Diego answered as QB Philip Rivers completed a 12-yard TD pass to RB LaDainian Tomlinson, but the Saints replied with Brees completing a 30-yard TD pass to WR Lance Moore. The Chargers closed out the half with Rivers completing a 12-yard TD pass to TE Antonio Gates. In the third quarter, New Orleans increased its lead Brees completing a 1-yard TD pass to TE Mark Campbell, after a very controversial Pass interference call on cornerback Cletis Gordon put the Saints on the 1-yard line. The \'Bolts would answer with Kaeding getting a 24-yard field goal. In the fourth quarter, the Saints continued to build its lead as FB Mike Karney got a 1-yard TD run. San Diego tried to rally as Kaeding nailed a 31-yard field goal, Rivers completed a 14-yard TD pass to WR Vincent Jackson, and Brees giving the \'Bolts a safety via an incomplete pass thrown into the back of his own endzone. However, New Orleans\' defense stiffened for the win. With the loss, the Chargers went into their bye week at 3-5.
|
Passage: Hoping to rebound from their road loss to the Bills, the Chargers flew to Wembley Stadium for the 2008 International Series game with the New Orleans Saints. In the first quarter, San Diego trailed early as kicker Taylor Mehlhaff got a 23-yard field goal. The \'Bolts would respond with kicker Nate Kaeding getting a 33-yard field goal. In the second quarter, New Orleans regained the lead as QB Drew Brees (a former Charger) completed a 12-yard TD pass to WR Devery Henderson (with a failed PAT) and RB Deuce McAllister getting a 1-yard TD run. San Diego answered as QB Philip Rivers completed a 12-yard TD pass to RB LaDainian Tomlinson, but the Saints replied with Brees completing a 30-yard TD pass to WR Lance Moore. The Chargers closed out the half with Rivers completing a 12-yard TD pass to TE Antonio Gates. In the third quarter, New Orleans increased its lead Brees completing a 1-yard TD pass to TE Mark Campbell, after a very controversial Pass interference call on cornerback Cletis Gordon put the Saints on the 1-yard line. The \'Bolts would answer with Kaeding getting a 24-yard field goal. In the fourth quarter, the Saints continued to build its lead as FB Mike Karney got a 1-yard TD run. San Diego tried to rally as Kaeding nailed a 31-yard field goal, Rivers completed a 14-yard TD pass to WR Vincent Jackson, and Brees giving the \'Bolts a safety via an incomplete pass thrown into the back of his own endzone. However, New Orleans\' defense stiffened for the win. With the loss, the Chargers went into their bye week at 3-5.
|
||||||
Question: How many total yards of touchdown passes did Drew Brees make?
|
Question: How many total yards of touchdown passes did Drew Brees make?
|
||||||
Answer: 43
|
Answer: 43'''
|
||||||
|
|
||||||
# Your Task
|
|
||||||
|
|
||||||
---
|
|
||||||
{prompt}
|
|
||||||
|
|
||||||
Think step by step, then write a line of the form "Answer: $ANSWER" at the end of your response.
|
|
||||||
'''
|
|
||||||
|
|
||||||
drop_infer_cfg = dict(
|
|
||||||
prompt_template=dict(type=PromptTemplate, template=template),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer))
|
|
||||||
|
|
||||||
drop_eval_cfg = dict(evaluator=dict(type=DropOpenAIEvaluator))
|
|
||||||
|
|
||||||
drop_datasets = [
|
|
||||||
dict(
|
|
||||||
abbr='drop',
|
|
||||||
type=DropOpenAIDataset,
|
|
||||||
path='./data/drop_simple_eval/dev.jsonl',
|
|
||||||
reader_cfg=drop_reader_cfg,
|
|
||||||
infer_cfg=drop_infer_cfg,
|
|
||||||
eval_cfg=drop_eval_cfg)
|
|
||||||
]
|
|
@ -1,4 +1,4 @@
|
|||||||
from mmengine.config import read_base
|
from mmengine.config import read_base
|
||||||
|
|
||||||
with read_base():
|
with read_base():
|
||||||
from .drop_gen_e18aaf import drop_datasets
|
from .drop_openai_simple_evals_gen_3857b0 import drop_datasets
|
||||||
|
43
configs/datasets/drop/drop_gen_a2697c.py
Normal file
43
configs/datasets/drop/drop_gen_a2697c.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# USED IN BASE MODEL
|
||||||
|
from mmengine.config import read_base
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import DropOpenAIDataset, DropOpenAIEvaluator
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .drop_examples import drop_examples # noqa: F401, F403
|
||||||
|
|
||||||
|
drop_reader_cfg = dict(
|
||||||
|
input_columns=['prompt'],
|
||||||
|
output_column='answers',
|
||||||
|
train_split='validation',
|
||||||
|
test_split='validation',
|
||||||
|
)
|
||||||
|
|
||||||
|
template = f'''\
|
||||||
|
You will be asked to read a passage and answer a question. Think step by step, then write a line of the form "Answer: $ANSWER" at the end of your response. Some examples of passages and Q&A are provided below.
|
||||||
|
|
||||||
|
{drop_examples}
|
||||||
|
|
||||||
|
# Your Task
|
||||||
|
|
||||||
|
---
|
||||||
|
{{prompt}}'''
|
||||||
|
|
||||||
|
drop_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=dict(round=[dict(role='HUMAN', prompt=template)])),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, stopping_criteria=['---', 'Passage', 'Question', 'You will be asked']),)
|
||||||
|
|
||||||
|
drop_eval_cfg = dict(evaluator=dict(type=DropOpenAIEvaluator))
|
||||||
|
|
||||||
|
drop_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='drop',
|
||||||
|
type=DropOpenAIDataset,
|
||||||
|
path='data/drop_simple_eval/dev.jsonl',
|
||||||
|
reader_cfg=drop_reader_cfg,
|
||||||
|
infer_cfg=drop_infer_cfg,
|
||||||
|
eval_cfg=drop_eval_cfg)
|
||||||
|
]
|
34
configs/datasets/drop/drop_gen_eb14af.py
Normal file
34
configs/datasets/drop/drop_gen_eb14af.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import DropOpenAIDataset, DropOpenAIEvaluator
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .drop_examples import drop_examples # noqa: F401, F403
|
||||||
|
|
||||||
|
drop_reader_cfg = dict(
|
||||||
|
input_columns=['prompt'],
|
||||||
|
output_column='answers',
|
||||||
|
train_split='validation',
|
||||||
|
test_split='validation',
|
||||||
|
)
|
||||||
|
|
||||||
|
template = f'You will be asked to read a passage and answer a question. Think step by step, then write a line of the form "Answer: $ANSWER" at the end of your response. Some examples of passages and Q&A are provided below.\n\n{drop_examples}\n\n# Your Task\n\n---\n{{prompt}}'
|
||||||
|
|
||||||
|
drop_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=dict(round=[dict(role='HUMAN', prompt=template)])),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
|
drop_eval_cfg = dict(evaluator=dict(type=DropOpenAIEvaluator))
|
||||||
|
|
||||||
|
drop_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='drop',
|
||||||
|
type=DropOpenAIDataset,
|
||||||
|
path='data/drop_simple_eval/dev.jsonl',
|
||||||
|
reader_cfg=drop_reader_cfg,
|
||||||
|
infer_cfg=drop_infer_cfg,
|
||||||
|
eval_cfg=drop_eval_cfg)
|
||||||
|
]
|
34
configs/datasets/drop/drop_openai_simple_evals_gen_3857b0.py
Normal file
34
configs/datasets/drop/drop_openai_simple_evals_gen_3857b0.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import DropOpenAIDataset, DropOpenAIEvaluator
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .drop_examples import drop_examples # noqa: F401, F403
|
||||||
|
|
||||||
|
drop_reader_cfg = dict(
|
||||||
|
input_columns=['prompt'],
|
||||||
|
output_column='answers',
|
||||||
|
train_split='validation',
|
||||||
|
test_split='validation',
|
||||||
|
)
|
||||||
|
|
||||||
|
template = f'You will be asked to read a passage and answer a question. Some examples of passages and Q&A are provided below.\n\n{drop_examples}\n\n# Your Task\n\n---\n{{prompt}}\n\nThink step by step, then write a line of the form "Answer: $ANSWER" at the end of your response.'
|
||||||
|
|
||||||
|
drop_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=dict(round=[dict(role='HUMAN', prompt=template)])),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
|
drop_eval_cfg = dict(evaluator=dict(type=DropOpenAIEvaluator))
|
||||||
|
|
||||||
|
drop_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='drop',
|
||||||
|
type=DropOpenAIDataset,
|
||||||
|
path='data/drop_simple_eval/dev.jsonl',
|
||||||
|
reader_cfg=drop_reader_cfg,
|
||||||
|
infer_cfg=drop_infer_cfg,
|
||||||
|
eval_cfg=drop_eval_cfg)
|
||||||
|
]
|
37
configs/datasets/gsm8k/gsm8k_0shot_v2_gen_a58960.py
Normal file
37
configs/datasets/gsm8k/gsm8k_0shot_v2_gen_a58960.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import GSM8KDataset, gsm8k_postprocess, gsm8k_dataset_postprocess, Gsm8kEvaluator
|
||||||
|
from opencompass.datasets import MATHEvaluator, math_postprocess_v2
|
||||||
|
|
||||||
|
gsm8k_reader_cfg = dict(input_columns=['question'], output_column='answer')
|
||||||
|
|
||||||
|
gsm8k_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='{question}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||||
|
)
|
||||||
|
|
||||||
|
gsm8k_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=MATHEvaluator, version='v2'),
|
||||||
|
pred_postprocessor=dict(type=math_postprocess_v2),
|
||||||
|
dataset_postprocessor=dict(type=gsm8k_dataset_postprocess),
|
||||||
|
)
|
||||||
|
|
||||||
|
gsm8k_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='gsm8k',
|
||||||
|
type=GSM8KDataset,
|
||||||
|
path='./data/gsm8k',
|
||||||
|
reader_cfg=gsm8k_reader_cfg,
|
||||||
|
infer_cfg=gsm8k_infer_cfg,
|
||||||
|
eval_cfg=gsm8k_eval_cfg,
|
||||||
|
)
|
||||||
|
]
|
@ -1,7 +1,7 @@
|
|||||||
# HumanEval
|
# HumanEval
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python3 run.py --models hf_internlm2_7b --datasets humaneval_gen_d2537e --debug
|
python3 run.py --models hf_internlm2_7b --datasets deprecated_humaneval_gen_d2537e --debug
|
||||||
python3 run.py --models hf_internlm2_chat_7b --datasets humaneval_gen_8e312c --debug
|
python3 run.py --models hf_internlm2_chat_7b --datasets humaneval_gen_8e312c --debug
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
pred_postprocessor=dict(type=humaneval_postprocess),
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
pred_postprocessor=dict(type=humaneval_postprocess),
|
@ -1,36 +0,0 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess
|
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
|
||||||
|
|
||||||
# TODO: allow empty output-column
|
|
||||||
humaneval_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(round=[
|
|
||||||
dict(
|
|
||||||
role='HUMAN',
|
|
||||||
prompt='Complete the following python code:\n{prompt}'),
|
|
||||||
])),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
|
||||||
evaluator=dict(type=HumanEvaluator),
|
|
||||||
pred_role='BOT',
|
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
|
||||||
)
|
|
||||||
|
|
||||||
humaneval_datasets = [
|
|
||||||
dict(
|
|
||||||
abbr='openai_humaneval',
|
|
||||||
type=HumanevalDataset,
|
|
||||||
path='./data/humaneval/human-eval-v2-20210705.jsonl',
|
|
||||||
reader_cfg=humaneval_reader_cfg,
|
|
||||||
infer_cfg=humaneval_infer_cfg,
|
|
||||||
eval_cfg=humaneval_eval_cfg)
|
|
||||||
]
|
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -16,7 +16,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -15,7 +15,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
pred_postprocessor=dict(type=humaneval_postprocess),
|
||||||
)
|
)
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -24,7 +24,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer))
|
inferencer=dict(type=GenInferencer))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
pred_postprocessor=dict(type=humaneval_postprocess),
|
35
configs/datasets/humaneval/humaneval_gen_66a7f4.py
Normal file
35
configs/datasets/humaneval/humaneval_gen_66a7f4.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
|
humaneval_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
HUMANEVAL_TEMPLATE = dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='You are an intelligent programming assistant to produce Python algorithmic solutions.\nCan you complete the following Python function?\n```python\n{prompt}\n```'),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=HUMANEVAL_TEMPLATE),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
|
k=[1, 10, 100],
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='openai_humaneval',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='./data/humaneval/human-eval-v2-20210705.jsonl',
|
||||||
|
reader_cfg=humaneval_reader_cfg,
|
||||||
|
infer_cfg=humaneval_infer_cfg,
|
||||||
|
eval_cfg=humaneval_eval_cfg,
|
||||||
|
)
|
||||||
|
]
|
@ -1,41 +0,0 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess
|
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
|
||||||
|
|
||||||
# TODO: allow empty output-column
|
|
||||||
humaneval_infer_cfg = dict(
|
|
||||||
prompt_template=dict(
|
|
||||||
type=PromptTemplate,
|
|
||||||
template=dict(
|
|
||||||
begin=[
|
|
||||||
dict(
|
|
||||||
role='SYSTEM',
|
|
||||||
fallback_role='HUMAN',
|
|
||||||
prompt='Complete the following python code:'),
|
|
||||||
],
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', prompt='{prompt}'),
|
|
||||||
])),
|
|
||||||
retriever=dict(type=ZeroRetriever),
|
|
||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
|
||||||
evaluator=dict(type=HumanEvaluator),
|
|
||||||
pred_role='BOT',
|
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
|
||||||
)
|
|
||||||
|
|
||||||
humaneval_datasets = [
|
|
||||||
dict(
|
|
||||||
abbr='openai_humaneval',
|
|
||||||
type=HumanevalDataset,
|
|
||||||
path='./data/humaneval/human-eval-v2-20210705.jsonl',
|
|
||||||
reader_cfg=humaneval_reader_cfg,
|
|
||||||
infer_cfg=humaneval_infer_cfg,
|
|
||||||
eval_cfg=humaneval_eval_cfg)
|
|
||||||
]
|
|
@ -1,7 +1,8 @@
|
|||||||
|
# THIS SHALL ALSO BE DEPRECATED
|
||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +20,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_reader_cfg = dict(
|
humaneval_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_eval_cfg = dict(
|
humaneval_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator),
|
evaluator=dict(type=HumanEvalEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
35
configs/datasets/humaneval_plus/humaneval_plus_gen_66a7f4.py
Normal file
35
configs/datasets/humaneval_plus/humaneval_plus_gen_66a7f4.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import HumanevalDataset, HumanEvalPlusEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
|
humaneval_plus_reader_cfg = dict(input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
|
|
||||||
|
HUMANEVAL_TEMPLATE = dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='You are an intelligent programming assistant to produce Python algorithmic solutions.\nCan you complete the following Python function?\n```python\n{prompt}\n```'),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_plus_infer_cfg = dict(
|
||||||
|
prompt_template=dict(type=PromptTemplate, template=HUMANEVAL_TEMPLATE),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=1024),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_plus_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=HumanEvalPlusEvaluator),
|
||||||
|
k=[1, 10, 100],
|
||||||
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
)
|
||||||
|
|
||||||
|
humaneval_plus_datasets = [
|
||||||
|
dict(
|
||||||
|
abbr='humaneval_plus',
|
||||||
|
type=HumanevalDataset,
|
||||||
|
path='./data/humaneval/human-eval-v2-20210705.jsonl',
|
||||||
|
reader_cfg=humaneval_plus_reader_cfg,
|
||||||
|
infer_cfg=humaneval_plus_infer_cfg,
|
||||||
|
eval_cfg=humaneval_plus_eval_cfg,
|
||||||
|
)
|
||||||
|
]
|
@ -1,7 +1,8 @@
|
|||||||
|
# THIS SHALL ALSO BE DEPRECATED
|
||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalPlusEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_plus_reader_cfg = dict(
|
humaneval_plus_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +20,7 @@ humaneval_plus_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_plus_eval_cfg = dict(
|
humaneval_plus_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator, metric='EvalPlus'),
|
evaluator=dict(type=HumanEvalPlusEvaluator),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_plus_reader_cfg = dict(
|
humaneval_plus_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_plus_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_plus_eval_cfg = dict(
|
humaneval_plus_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator, metric='EvalPlus'),
|
evaluator=dict(type=HumanEvalEvaluator, metric='EvalPlus'),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v2
|
||||||
|
|
||||||
humaneval_plus_reader_cfg = dict(
|
humaneval_plus_reader_cfg = dict(
|
||||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||||
@ -19,7 +19,7 @@ humaneval_plus_infer_cfg = dict(
|
|||||||
inferencer=dict(type=GenInferencer, max_out_len=512))
|
inferencer=dict(type=GenInferencer, max_out_len=512))
|
||||||
|
|
||||||
humaneval_plus_eval_cfg = dict(
|
humaneval_plus_eval_cfg = dict(
|
||||||
evaluator=dict(type=HumanEvaluator, metric='EvalPlus'),
|
evaluator=dict(type=HumanEvalEvaluator, metric='EvalPlus'),
|
||||||
pred_role='BOT',
|
pred_role='BOT',
|
||||||
k=[1, 10, 100], # the parameter only for humaneval
|
k=[1, 10, 100], # the parameter only for humaneval
|
||||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||||
|
16
configs/datasets/mmlu_pro/mmlu_pro_categories.py
Normal file
16
configs/datasets/mmlu_pro/mmlu_pro_categories.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
categories = [
|
||||||
|
'math',
|
||||||
|
'physics',
|
||||||
|
'chemistry',
|
||||||
|
'law',
|
||||||
|
'engineering',
|
||||||
|
'other',
|
||||||
|
'economics',
|
||||||
|
'health',
|
||||||
|
'psychology',
|
||||||
|
'business',
|
||||||
|
'biology',
|
||||||
|
'philosophy',
|
||||||
|
'computer science',
|
||||||
|
'history',
|
||||||
|
]
|
58
configs/datasets/mmlu_pro/mmlu_pro_gen_cdbebf.py
Normal file
58
configs/datasets/mmlu_pro/mmlu_pro_gen_cdbebf.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import FixKRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||||
|
from opencompass.datasets import MMLUProDataset
|
||||||
|
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .mmlu_pro_categories import categories
|
||||||
|
|
||||||
|
|
||||||
|
mmlu_pro_datasets = []
|
||||||
|
|
||||||
|
for category in categories:
|
||||||
|
mmlu_pro_reader_cfg = dict(
|
||||||
|
input_columns=['question', 'cot_content', 'options_str'],
|
||||||
|
output_column='answer',
|
||||||
|
train_split='validation',
|
||||||
|
test_split='test',
|
||||||
|
)
|
||||||
|
|
||||||
|
mmlu_pro_infer_cfg = dict(
|
||||||
|
ice_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(role='HUMAN', prompt='Question:\n{question}\nOptions:\n{options_str}'),
|
||||||
|
dict(role='BOT', prompt="Answer: Let's think step by step. {cot_content}")
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin='</E>',
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Question:\n{question}\nOptions:\n{options_str}'),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
ice_token='</E>',
|
||||||
|
),
|
||||||
|
retriever=dict(type=FixKRetriever, fix_id_list=[0, 1, 2, 3, 4]),
|
||||||
|
inferencer=dict(type=GenInferencer),
|
||||||
|
)
|
||||||
|
|
||||||
|
mmlu_pro_eval_cfg = dict(
|
||||||
|
evaluator=dict(type=AccEvaluator),
|
||||||
|
pred_postprocessor=dict(type=first_option_postprocess, options='ABCDEFGHIJKLMNOP'),
|
||||||
|
)
|
||||||
|
|
||||||
|
mmlu_pro_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr=f'mmlu_pro_{category.replace(" ", "_")}',
|
||||||
|
type=MMLUProDataset,
|
||||||
|
category=category,
|
||||||
|
reader_cfg=mmlu_pro_reader_cfg,
|
||||||
|
infer_cfg=mmlu_pro_infer_cfg,
|
||||||
|
eval_cfg=mmlu_pro_eval_cfg,
|
||||||
|
))
|
61
configs/datasets/nq/nq_open_1shot_gen_2e45e5.py
Normal file
61
configs/datasets/nq/nq_open_1shot_gen_2e45e5.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever, FixKRetriever, RandomRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import NQOpenDataset, NQEvaluator
|
||||||
|
|
||||||
|
nq_datasets = []
|
||||||
|
for k in [1]:
|
||||||
|
nq_reader_cfg = dict(
|
||||||
|
input_columns=['question'], output_column='answer', train_split='train', test_split='validation')
|
||||||
|
|
||||||
|
if k == 0:
|
||||||
|
nq_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Q: {question}?'),
|
||||||
|
dict(role='BOT', prompt='A:'),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=50)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
nq_infer_cfg = dict(
|
||||||
|
ice_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Q: {question}?'),
|
||||||
|
dict(role='BOT', prompt='A: {answer}.\n'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin='</E>',
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Q: {question}?'),
|
||||||
|
dict(role='BOT', prompt='A:'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
ice_token='</E>',
|
||||||
|
),
|
||||||
|
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=50),
|
||||||
|
)
|
||||||
|
|
||||||
|
nq_eval_cfg = dict(evaluator=dict(type=NQEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
nq_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=NQOpenDataset,
|
||||||
|
abbr=f'nq_open_{k}shot',
|
||||||
|
path='./data/nq-open/',
|
||||||
|
reader_cfg=nq_reader_cfg,
|
||||||
|
infer_cfg=nq_infer_cfg,
|
||||||
|
eval_cfg=nq_eval_cfg)
|
||||||
|
)
|
@ -0,0 +1,64 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import LMEvaluator
|
||||||
|
from opencompass.datasets import AlignmentBenchDataset
|
||||||
|
|
||||||
|
subjective_reader_cfg = dict(
|
||||||
|
input_columns=['question', 'capability', 'critiquellm_prefix'],
|
||||||
|
output_column='judge',
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_all_sets = [
|
||||||
|
'alignment_bench_v1_1',
|
||||||
|
]
|
||||||
|
data_path ='data/subjective/alignment_bench'
|
||||||
|
|
||||||
|
alignment_bench_config_path = 'data/subjective/alignment_bench/config'
|
||||||
|
alignment_bench_config_name = 'multi-dimension'
|
||||||
|
|
||||||
|
subjective_datasets = []
|
||||||
|
|
||||||
|
for _name in subjective_all_sets:
|
||||||
|
subjective_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='{question}'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=LMEvaluator,
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt = '{critiquellm_prefix}[助手的答案开始]\n{prediction}\n[助手的答案结束]\n'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr=f'{_name}',
|
||||||
|
type=AlignmentBenchDataset,
|
||||||
|
path=data_path,
|
||||||
|
name=_name,
|
||||||
|
alignment_bench_config_path=alignment_bench_config_path,
|
||||||
|
alignment_bench_config_name=alignment_bench_config_name,
|
||||||
|
reader_cfg=subjective_reader_cfg,
|
||||||
|
infer_cfg=subjective_infer_cfg,
|
||||||
|
eval_cfg=subjective_eval_cfg
|
||||||
|
))
|
@ -13,7 +13,7 @@ data_path ='data/subjective/compassbench'
|
|||||||
|
|
||||||
subjective_datasets = []
|
subjective_datasets = []
|
||||||
|
|
||||||
versions = ['CompassbenchV1']
|
versions = ['CompassBenchV1.1']
|
||||||
|
|
||||||
for version_abbr in versions:
|
for version_abbr in versions:
|
||||||
subjective_infer_cfg = dict(
|
subjective_infer_cfg = dict(
|
||||||
|
@ -0,0 +1,58 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import LMEvaluator
|
||||||
|
from opencompass.datasets import CompassBenchControlLengthBiasDataset
|
||||||
|
|
||||||
|
subjective_reader_cfg = dict(
|
||||||
|
input_columns=['question', 'judge_prompt'],
|
||||||
|
output_column='judge',
|
||||||
|
)
|
||||||
|
|
||||||
|
data_path ='data/subjective/compassbench'
|
||||||
|
|
||||||
|
subjective_datasets = []
|
||||||
|
|
||||||
|
versions = ['CompassBenchV1.1']
|
||||||
|
|
||||||
|
for version_abbr in versions:
|
||||||
|
subjective_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='{question}'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_seq_len=4096, max_out_len=2048),
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=LMEvaluator,
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt = '{judge_prompt}'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr=version_abbr,
|
||||||
|
type=CompassBenchControlLengthBiasDataset,
|
||||||
|
path=data_path,
|
||||||
|
name=version_abbr,
|
||||||
|
reader_cfg=subjective_reader_cfg,
|
||||||
|
infer_cfg=subjective_infer_cfg,
|
||||||
|
eval_cfg=subjective_eval_cfg
|
||||||
|
))
|
@ -0,0 +1,58 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import LMEvaluator
|
||||||
|
from opencompass.datasets import CompassBenchControlLengthBiasDataset
|
||||||
|
|
||||||
|
subjective_reader_cfg = dict(
|
||||||
|
input_columns=['question', 'judge_prompt'],
|
||||||
|
output_column='judge',
|
||||||
|
)
|
||||||
|
|
||||||
|
data_path ='data/subjective/compassbench'
|
||||||
|
|
||||||
|
subjective_datasets = []
|
||||||
|
|
||||||
|
versions = ['CompassBenchV1.1.patch', 'CompassBenchV1.1.patch.en']
|
||||||
|
|
||||||
|
for version_abbr in versions:
|
||||||
|
subjective_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='{question}'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_seq_len=4096, max_out_len=2048),
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=LMEvaluator,
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt = '{judge_prompt}'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr=version_abbr,
|
||||||
|
type=CompassBenchControlLengthBiasDataset,
|
||||||
|
path=data_path,
|
||||||
|
name=version_abbr,
|
||||||
|
reader_cfg=subjective_reader_cfg,
|
||||||
|
infer_cfg=subjective_infer_cfg,
|
||||||
|
eval_cfg=subjective_eval_cfg
|
||||||
|
))
|
@ -0,0 +1,58 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.openicl.icl_evaluator import LMEvaluator
|
||||||
|
from opencompass.datasets import CompassBenchControlLengthBiasDataset
|
||||||
|
|
||||||
|
subjective_reader_cfg = dict(
|
||||||
|
input_columns=['question', 'judge_prompt'],
|
||||||
|
output_column='judge',
|
||||||
|
)
|
||||||
|
|
||||||
|
data_path ='data/subjective/compassbench'
|
||||||
|
|
||||||
|
subjective_datasets = []
|
||||||
|
|
||||||
|
versions = ['CompassBenchV1.2']
|
||||||
|
|
||||||
|
for version_abbr in versions:
|
||||||
|
subjective_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt='{question}'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_seq_len=4096, max_out_len=2048),
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_eval_cfg = dict(
|
||||||
|
evaluator=dict(
|
||||||
|
type=LMEvaluator,
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(round=[
|
||||||
|
dict(
|
||||||
|
role='HUMAN',
|
||||||
|
prompt = '{judge_prompt}'
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
pred_role='BOT',
|
||||||
|
)
|
||||||
|
|
||||||
|
subjective_datasets.append(
|
||||||
|
dict(
|
||||||
|
abbr=version_abbr,
|
||||||
|
type=CompassBenchControlLengthBiasDataset,
|
||||||
|
path=data_path,
|
||||||
|
name=version_abbr,
|
||||||
|
reader_cfg=subjective_reader_cfg,
|
||||||
|
infer_cfg=subjective_infer_cfg,
|
||||||
|
eval_cfg=subjective_eval_cfg
|
||||||
|
))
|
62
configs/datasets/triviaqa/triviaqa_wiki_1shot_gen_bc5f21.py
Normal file
62
configs/datasets/triviaqa/triviaqa_wiki_1shot_gen_bc5f21.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||||
|
from opencompass.openicl.icl_retriever import ZeroRetriever, FixKRetriever
|
||||||
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||||
|
from opencompass.datasets import TriviaQADataset_V2, TriviaQAEvaluator
|
||||||
|
|
||||||
|
|
||||||
|
triviaqa_datasets = []
|
||||||
|
for k in [1]:
|
||||||
|
triviaqa_reader_cfg = dict(
|
||||||
|
input_columns=['question'], output_column='answer', train_split='train', test_split='validation')
|
||||||
|
|
||||||
|
if k == 0:
|
||||||
|
triviaqa_infer_cfg = dict(
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Q: {question}'),
|
||||||
|
dict(role='BOT', prompt='A:'),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
retriever=dict(type=ZeroRetriever),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=50)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
triviaqa_infer_cfg = dict(
|
||||||
|
ice_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Q: {question}'),
|
||||||
|
dict(role='BOT', prompt='A: {answer}.\n'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
prompt_template=dict(
|
||||||
|
type=PromptTemplate,
|
||||||
|
template=dict(
|
||||||
|
begin='</E>',
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', prompt='Q: {question}'),
|
||||||
|
dict(role='BOT', prompt='A:'),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
ice_token='</E>',
|
||||||
|
),
|
||||||
|
retriever=dict(type=FixKRetriever, fix_id_list=list(range(k))),
|
||||||
|
inferencer=dict(type=GenInferencer, max_out_len=50),
|
||||||
|
)
|
||||||
|
|
||||||
|
triviaqa_eval_cfg = dict(evaluator=dict(type=TriviaQAEvaluator), pred_role='BOT')
|
||||||
|
|
||||||
|
triviaqa_datasets.append(
|
||||||
|
dict(
|
||||||
|
type=TriviaQADataset_V2,
|
||||||
|
abbr=f'triviaqa_wiki_{k}shot',
|
||||||
|
path='./data/triviaqa',
|
||||||
|
reader_cfg=triviaqa_reader_cfg,
|
||||||
|
infer_cfg=triviaqa_infer_cfg,
|
||||||
|
eval_cfg=triviaqa_eval_cfg)
|
||||||
|
)
|
@ -5,7 +5,7 @@ with read_base():
|
|||||||
from .datasets.triviaqa.triviaqa_wiki_gen_d18bf4 import triviaqa_datasets
|
from .datasets.triviaqa.triviaqa_wiki_gen_d18bf4 import triviaqa_datasets
|
||||||
from .datasets.nq.nq_open_gen_e93f8a import nq_datasets
|
from .datasets.nq.nq_open_gen_e93f8a import nq_datasets
|
||||||
from .datasets.gsm8k.gsm8k_gen_3309bd import gsm8k_datasets
|
from .datasets.gsm8k.gsm8k_gen_3309bd import gsm8k_datasets
|
||||||
from .datasets.humaneval.humaneval_gen_a82cae import humaneval_datasets
|
from .datasets.humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
||||||
from .datasets.agieval.agieval_mixed_713d14 import agieval_datasets
|
from .datasets.agieval.agieval_mixed_713d14 import agieval_datasets
|
||||||
from .datasets.SuperGLUE_BoolQ.SuperGLUE_BoolQ_ppl_314797 import BoolQ_datasets
|
from .datasets.SuperGLUE_BoolQ.SuperGLUE_BoolQ_ppl_314797 import BoolQ_datasets
|
||||||
from .datasets.hellaswag.hellaswag_ppl_a6e128 import hellaswag_datasets
|
from .datasets.hellaswag.hellaswag_ppl_a6e128 import hellaswag_datasets
|
||||||
|
@ -6,7 +6,7 @@ with read_base():
|
|||||||
from .datasets.bbh.bbh_gen_5b92b0 import bbh_datasets
|
from .datasets.bbh.bbh_gen_5b92b0 import bbh_datasets
|
||||||
from .datasets.gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
from .datasets.gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
|
||||||
from .datasets.math.math_gen_265cce import math_datasets
|
from .datasets.math.math_gen_265cce import math_datasets
|
||||||
from .datasets.humaneval.humaneval_gen_a82cae import humaneval_datasets
|
from .datasets.humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
||||||
from .datasets.mbpp.deprecated_sanitized_mbpp_gen_1e1056 import sanitized_mbpp_datasets
|
from .datasets.mbpp.deprecated_sanitized_mbpp_gen_1e1056 import sanitized_mbpp_datasets
|
||||||
|
|
||||||
from .models.hf_internlm.hf_internlm2_7b import models as hf_internlm2_7b_model
|
from .models.hf_internlm.hf_internlm2_7b import models as hf_internlm2_7b_model
|
||||||
|
@ -6,7 +6,7 @@ from opencompass.tasks import OpenICLInferTask
|
|||||||
|
|
||||||
with read_base():
|
with read_base():
|
||||||
from .summarizers.leaderboard import summarizer
|
from .summarizers.leaderboard import summarizer
|
||||||
from .datasets.humaneval.humaneval_gen_a82cae import humaneval_datasets
|
from .datasets.humaneval.deprecated_humaneval_gen_a82cae import humaneval_datasets
|
||||||
|
|
||||||
datasets = [*humaneval_datasets]
|
datasets = [*humaneval_datasets]
|
||||||
|
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
from mmengine.config import read_base
|
|
||||||
|
|
||||||
with read_base():
|
|
||||||
from .datasets.collections.base_medium_llama import piqa_datasets, siqa_datasets
|
|
||||||
from .models.mixtral.mixtral_8x7b_32k import models
|
|
||||||
|
|
||||||
|
|
||||||
datasets = [*piqa_datasets, *siqa_datasets]
|
|
34
configs/eval_mmlu_pro.py
Normal file
34
configs/eval_mmlu_pro.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from mmengine.config import read_base
|
||||||
|
|
||||||
|
with read_base():
|
||||||
|
from .datasets.mmlu_pro.mmlu_pro_gen_cdbebf import mmlu_pro_datasets
|
||||||
|
|
||||||
|
from .models.qwen.lmdeploy_qwen2_7b_instruct import models as lmdeploy_qwen2_7b_instruct_model
|
||||||
|
from .models.hf_llama.lmdeploy_llama3_8b_instruct import models as lmdeploy_llama3_8b_instruct_model
|
||||||
|
|
||||||
|
from .summarizers.mmlu_pro import summarizer
|
||||||
|
from .internal.clusters.local import infer_num_worker as infer
|
||||||
|
from .internal.clusters.local import eval
|
||||||
|
|
||||||
|
datasets = sum([v for k, v in locals().items() if k.endswith('_datasets') or k == 'datasets'], [])
|
||||||
|
models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
|
||||||
|
|
||||||
|
work_dir = 'outputs/debug/mmlu_pro'
|
||||||
|
|
||||||
|
# dataset version metric mode qwen2-7b-instruct-turbomind llama-3-8b-instruct-turbomind
|
||||||
|
# ------------------------- --------- ------------- ------ ----------------------------- -------------------------------
|
||||||
|
# mmlu_pro - naive_average gen 46.18 43.92
|
||||||
|
# mmlu_pro_biology 736233 accuracy gen 63.74 64.02
|
||||||
|
# mmlu_pro_business 736233 accuracy gen 53.23 46.01
|
||||||
|
# mmlu_pro_chemistry 736233 accuracy gen 35.25 32.42
|
||||||
|
# mmlu_pro_computer_science 736233 accuracy gen 47.07 44.88
|
||||||
|
# mmlu_pro_economics 736233 accuracy gen 59.00 53.79
|
||||||
|
# mmlu_pro_engineering 736233 accuracy gen 26.73 33.54
|
||||||
|
# mmlu_pro_health 736233 accuracy gen 47.31 51.34
|
||||||
|
# mmlu_pro_history 736233 accuracy gen 42.78 42.26
|
||||||
|
# mmlu_pro_law 736233 accuracy gen 28.07 26.98
|
||||||
|
# mmlu_pro_math 736233 accuracy gen 53.59 37.53
|
||||||
|
# mmlu_pro_philosophy 736233 accuracy gen 42.28 42.48
|
||||||
|
# mmlu_pro_physics 736233 accuracy gen 39.11 33.64
|
||||||
|
# mmlu_pro_psychology 736233 accuracy gen 60.90 59.65
|
||||||
|
# mmlu_pro_other 736233 accuracy gen 47.40 46.32
|
13
configs/models/chatglm/hf_glm4_9b_chat.py
Normal file
13
configs/models/chatglm/hf_glm4_9b_chat.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
from opencompass.models import HuggingFacewithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=HuggingFacewithChatTemplate,
|
||||||
|
abbr='glm-4-9b-chat-hf',
|
||||||
|
path='THUDM/glm-4-9b-chat',
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=8,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
stop_words=['<|endoftext|>', '<|user|>', '<|observation|>'],
|
||||||
|
)
|
||||||
|
]
|
13
configs/models/chatglm/vllm_chatglm3_6b.py
Normal file
13
configs/models/chatglm/vllm_chatglm3_6b.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
from opencompass.models import VLLMwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLMwithChatTemplate,
|
||||||
|
abbr='chatglm3-6b-vllm',
|
||||||
|
path='THUDM/chatglm3-6b',
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
14
configs/models/chatglm/vllm_glm4_9b_chat.py
Normal file
14
configs/models/chatglm/vllm_glm4_9b_chat.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
from opencompass.models import VLLMwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLMwithChatTemplate,
|
||||||
|
abbr='glm-4-9b-chat-vllm',
|
||||||
|
path='THUDM/glm-4-9b-chat',
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
stop_words=['<|endoftext|>', '<|user|>', '<|observation|>'],
|
||||||
|
)
|
||||||
|
]
|
@ -6,7 +6,7 @@ models = [
|
|||||||
abbr='deepseek-v2-chat-hf',
|
abbr='deepseek-v2-chat-hf',
|
||||||
path='deepseek-ai/DeepSeek-V2-Chat',
|
path='deepseek-ai/DeepSeek-V2-Chat',
|
||||||
max_out_len=1024,
|
max_out_len=1024,
|
||||||
batch_size=4,
|
batch_size=2,
|
||||||
model_kwargs=dict(
|
model_kwargs=dict(
|
||||||
device_map='sequential',
|
device_map='sequential',
|
||||||
torch_dtype='torch.bfloat16',
|
torch_dtype='torch.bfloat16',
|
||||||
|
17
configs/models/deepseek/hf_deepseek_v2_lite.py
Normal file
17
configs/models/deepseek/hf_deepseek_v2_lite.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
from opencompass.models import HuggingFaceBaseModel
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=HuggingFaceBaseModel,
|
||||||
|
abbr='deepseek-v2-lite-hf',
|
||||||
|
path='deepseek-ai/DeepSeek-V2-Lite',
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=4,
|
||||||
|
model_kwargs=dict(
|
||||||
|
device_map='sequential',
|
||||||
|
torch_dtype='torch.bfloat16',
|
||||||
|
attn_implementation='eager'
|
||||||
|
),
|
||||||
|
run_cfg=dict(num_gpus=2),
|
||||||
|
)
|
||||||
|
]
|
17
configs/models/deepseek/hf_deepseek_v2_lite_chat.py
Normal file
17
configs/models/deepseek/hf_deepseek_v2_lite_chat.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
from opencompass.models import HuggingFacewithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=HuggingFacewithChatTemplate,
|
||||||
|
abbr='deepseek-v2-lite-chat-hf',
|
||||||
|
path='deepseek-ai/DeepSeek-V2-Lite-Chat',
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=4,
|
||||||
|
model_kwargs=dict(
|
||||||
|
device_map='sequential',
|
||||||
|
torch_dtype='torch.bfloat16',
|
||||||
|
attn_implementation='eager'
|
||||||
|
),
|
||||||
|
run_cfg=dict(num_gpus=2),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/deepseek/lmdeploy_deepseek_67b_base.py
Normal file
15
configs/models/deepseek/lmdeploy_deepseek_67b_base.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModel
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='deepseek-67b-base-turbomind',
|
||||||
|
path='deepseek-ai/deepseek-llm-67b-base',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=4),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=4),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/deepseek/lmdeploy_deepseek_67b_chat.py
Normal file
15
configs/models/deepseek/lmdeploy_deepseek_67b_chat.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModelwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModelwithChatTemplate,
|
||||||
|
abbr='deepseek-67b-chat-turbomind',
|
||||||
|
path='deepseek-ai/deepseek-llm-67b-chat',
|
||||||
|
engine_config=dict(max_batch_size=16, tp=4),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=4),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/deepseek/lmdeploy_deepseek_7b_base.py
Normal file
15
configs/models/deepseek/lmdeploy_deepseek_7b_base.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModel
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='deepseek-7b-base-turbomind',
|
||||||
|
path='deepseek-ai/deepseek-llm-7b-base',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/deepseek/lmdeploy_deepseek_7b_chat.py
Normal file
15
configs/models/deepseek/lmdeploy_deepseek_7b_chat.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModelwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModelwithChatTemplate,
|
||||||
|
abbr='deepseek-7b-chat-turbomind',
|
||||||
|
path='deepseek-ai/deepseek-llm-7b-chat',
|
||||||
|
engine_config=dict(max_batch_size=16, tp=1),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
13
configs/models/deepseek/vllm_deepseek_67b_chat.py
Normal file
13
configs/models/deepseek/vllm_deepseek_67b_chat.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
from opencompass.models import VLLMwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLMwithChatTemplate,
|
||||||
|
abbr='deepseek-67b-chat-vllm',
|
||||||
|
path='deepseek-ai/deepseek-llm-67b-chat',
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
model_kwargs=dict(tensor_parallel_size=4),
|
||||||
|
run_cfg=dict(num_gpus=4),
|
||||||
|
)
|
||||||
|
]
|
13
configs/models/deepseek/vllm_deepseek_7b_chat.py
Normal file
13
configs/models/deepseek/vllm_deepseek_7b_chat.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
from opencompass.models import VLLMwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLMwithChatTemplate,
|
||||||
|
abbr='deepseek-7b-chat-vllm',
|
||||||
|
path='deepseek-ai/deepseek-llm-7b-chat',
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/deepseek/vllm_deepseek_moe_16b_base.py
Normal file
15
configs/models/deepseek/vllm_deepseek_moe_16b_base.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import VLLM
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLM,
|
||||||
|
abbr='deepseek-moe-16b-base-hf',
|
||||||
|
path='deepseek-ai/deepseek-moe-16b-base',
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.6),
|
||||||
|
max_out_len=1024,
|
||||||
|
max_seq_len=8192,
|
||||||
|
batch_size=16,
|
||||||
|
generation_kwargs=dict(temperature=0),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
13
configs/models/deepseek/vllm_deepseek_moe_16b_chat.py
Normal file
13
configs/models/deepseek/vllm_deepseek_moe_16b_chat.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
from opencompass.models import VLLMwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLMwithChatTemplate,
|
||||||
|
abbr='deepseek-moe-16b-chat-vllm',
|
||||||
|
path='deepseek-ai/deepseek-moe-16b-chat',
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.6),
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/gemma/vllm_gemma_2b.py
Normal file
15
configs/models/gemma/vllm_gemma_2b.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import VLLM
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLM,
|
||||||
|
abbr='gemma-2b-vllm',
|
||||||
|
path='google/gemma-2b',
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.5),
|
||||||
|
max_out_len=1024,
|
||||||
|
max_seq_len=8192,
|
||||||
|
batch_size=16,
|
||||||
|
generation_kwargs=dict(temperature=0),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
14
configs/models/gemma/vllm_gemma_2b_it.py
Normal file
14
configs/models/gemma/vllm_gemma_2b_it.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
from opencompass.models import VLLMwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLMwithChatTemplate,
|
||||||
|
abbr='gemma-2b-it-vllm',
|
||||||
|
path='google/gemma-2b-it',
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.5),
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
generation_kwargs=dict(temperature=0),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/gemma/vllm_gemma_7b.py
Normal file
15
configs/models/gemma/vllm_gemma_7b.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import VLLM
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLM,
|
||||||
|
abbr='gemma-7b-vllm',
|
||||||
|
path='google/gemma-7b',
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.5),
|
||||||
|
max_out_len=1024,
|
||||||
|
max_seq_len=8192,
|
||||||
|
batch_size=16,
|
||||||
|
generation_kwargs=dict(temperature=0),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
14
configs/models/gemma/vllm_gemma_7b_it.py
Normal file
14
configs/models/gemma/vllm_gemma_7b_it.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
from opencompass.models import VLLMwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLMwithChatTemplate,
|
||||||
|
abbr='gemma-7b-it-vllm',
|
||||||
|
path='google/gemma-7b-it',
|
||||||
|
model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.5),
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
generation_kwargs=dict(temperature=0),
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
@ -26,7 +26,7 @@ python3 run.py --models hf_internlm2_7b --datasets bbh_gen_98fba6 --debug
|
|||||||
python3 run.py --models hf_internlm2_7b --datasets gsm8k_gen_17d0dc --debug
|
python3 run.py --models hf_internlm2_7b --datasets gsm8k_gen_17d0dc --debug
|
||||||
python3 run.py --models hf_internlm2_7b --datasets math_4shot_base_gen_db136b --debug
|
python3 run.py --models hf_internlm2_7b --datasets math_4shot_base_gen_db136b --debug
|
||||||
python3 run.py --models hf_internlm2_7b --datasets TheoremQA_5shot_gen_6f0af8 --debug
|
python3 run.py --models hf_internlm2_7b --datasets TheoremQA_5shot_gen_6f0af8 --debug
|
||||||
python3 run.py --models hf_internlm2_7b --datasets humaneval_gen_d2537e --debug
|
python3 run.py --models hf_internlm2_7b --datasets deprecated_humaneval_gen_d2537e --debug
|
||||||
python3 run.py --models hf_internlm2_7b --datasets sanitized_mbpp_gen_742f0c --debug
|
python3 run.py --models hf_internlm2_7b --datasets sanitized_mbpp_gen_742f0c --debug
|
||||||
python3 run.py --models hf_internlm2_7b --datasets lcbench_gen_5ff288 --debug
|
python3 run.py --models hf_internlm2_7b --datasets lcbench_gen_5ff288 --debug
|
||||||
python3 run.py --models hf_internlm2_7b --datasets gpqa_ppl_6bf57a --debug
|
python3 run.py --models hf_internlm2_7b --datasets gpqa_ppl_6bf57a --debug
|
||||||
|
15
configs/models/hf_internlm/lmdeploy_internlm2_1_8b.py
Normal file
15
configs/models/hf_internlm/lmdeploy_internlm2_1_8b.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModel
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='internlm2-1.8b-turbomind',
|
||||||
|
path='internlm/internlm2-1_8b',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
@ -1,27 +1,15 @@
|
|||||||
from opencompass.models.turbomind import TurboMindModel
|
from opencompass.models import TurboMindModel
|
||||||
|
|
||||||
|
|
||||||
models = [
|
models = [
|
||||||
dict(
|
dict(
|
||||||
type=TurboMindModel,
|
type=TurboMindModel,
|
||||||
abbr='internlm2-20b-turbomind',
|
abbr='internlm2-20b-turbomind',
|
||||||
path='internlm/internlm2-20b',
|
path='internlm/internlm2-20b',
|
||||||
engine_config=dict(
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=2),
|
||||||
session_len=32768,
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
max_batch_size=32,
|
max_seq_len=7168,
|
||||||
model_name='internlm2-20b',
|
max_out_len=1024,
|
||||||
tp=2,
|
batch_size=16,
|
||||||
),
|
run_cfg=dict(num_gpus=2),
|
||||||
gen_config=dict(
|
|
||||||
top_k=1,
|
|
||||||
top_p=0.8,
|
|
||||||
temperature=1.0,
|
|
||||||
max_new_tokens=2000,
|
|
||||||
),
|
|
||||||
max_out_len=2000,
|
|
||||||
max_seq_len=32768,
|
|
||||||
batch_size=32,
|
|
||||||
concurrency=8,
|
|
||||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
15
configs/models/hf_internlm/lmdeploy_internlm2_7b.py
Normal file
15
configs/models/hf_internlm/lmdeploy_internlm2_7b.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModel
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='internlm2-7b-turbomind',
|
||||||
|
path='internlm/internlm2-7b',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/hf_internlm/lmdeploy_internlm2_base_20b.py
Normal file
15
configs/models/hf_internlm/lmdeploy_internlm2_base_20b.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModel
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='internlm2-base-20b-turbomind',
|
||||||
|
path='internlm/internlm2-base-20b',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=2),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=2),
|
||||||
|
)
|
||||||
|
]
|
15
configs/models/hf_internlm/lmdeploy_internlm2_base_7b.py
Normal file
15
configs/models/hf_internlm/lmdeploy_internlm2_base_7b.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModel
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='internlm2-base-7b-turbomind',
|
||||||
|
path='internlm/internlm2-base-7b',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
@ -5,19 +5,11 @@ models = [
|
|||||||
type=TurboMindModelwithChatTemplate,
|
type=TurboMindModelwithChatTemplate,
|
||||||
abbr='internlm2-chat-1.8b-turbomind',
|
abbr='internlm2-chat-1.8b-turbomind',
|
||||||
path='internlm/internlm2-chat-1_8b',
|
path='internlm/internlm2-chat-1_8b',
|
||||||
engine_config=dict(
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
|
||||||
max_batch_size=16,
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
tp=1,
|
max_seq_len=7168,
|
||||||
),
|
|
||||||
gen_config=dict(
|
|
||||||
top_k=1,
|
|
||||||
temperature=1e-6,
|
|
||||||
top_p=0.9,
|
|
||||||
),
|
|
||||||
max_seq_len=2048,
|
|
||||||
max_out_len=1024,
|
max_out_len=1024,
|
||||||
batch_size=32768,
|
batch_size=16,
|
||||||
run_cfg=dict(num_gpus=1),
|
run_cfg=dict(num_gpus=1),
|
||||||
stop_words=['</s>', '<|im_end|>'],
|
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModelwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModelwithChatTemplate,
|
||||||
|
abbr='internlm2-chat-1.8b-sft-turbomind',
|
||||||
|
path='internlm/internlm2-chat-1_8b-sft',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=1),
|
||||||
|
)
|
||||||
|
]
|
@ -1,36 +1,15 @@
|
|||||||
from opencompass.models.turbomind import TurboMindModel
|
from opencompass.models import TurboMindModelwithChatTemplate
|
||||||
|
|
||||||
|
|
||||||
_meta_template = dict(
|
|
||||||
round=[
|
|
||||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
|
||||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
models = [
|
models = [
|
||||||
dict(
|
dict(
|
||||||
type=TurboMindModel,
|
type=TurboMindModelwithChatTemplate,
|
||||||
abbr='internlm2-chat-20b-turbomind',
|
abbr='internlm2-chat-20b-turbomind',
|
||||||
path='internlm/internlm2-chat-20b',
|
path='internlm/internlm2-chat-20b',
|
||||||
meta_template=_meta_template,
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=2),
|
||||||
engine_config=dict(
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
session_len=32768,
|
max_seq_len=7168,
|
||||||
max_batch_size=32,
|
max_out_len=1024,
|
||||||
model_name='internlm2-chat-20b',
|
batch_size=16,
|
||||||
tp=2,
|
run_cfg=dict(num_gpus=2),
|
||||||
stop_words=[2, 92542],
|
|
||||||
),
|
|
||||||
gen_config=dict(
|
|
||||||
top_k=1,
|
|
||||||
top_p=0.8,
|
|
||||||
temperature=1.0,
|
|
||||||
max_new_tokens=2000,
|
|
||||||
),
|
|
||||||
max_out_len=2000,
|
|
||||||
max_seq_len=32768,
|
|
||||||
batch_size=32,
|
|
||||||
concurrency=8,
|
|
||||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
@ -0,0 +1,15 @@
|
|||||||
|
from opencompass.models import TurboMindModelwithChatTemplate
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModelwithChatTemplate,
|
||||||
|
abbr='internlm2-chat-20b-sft-turbomind',
|
||||||
|
path='internlm/internlm2-chat-20b-sft',
|
||||||
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=2),
|
||||||
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
|
max_seq_len=7168,
|
||||||
|
max_out_len=1024,
|
||||||
|
batch_size=16,
|
||||||
|
run_cfg=dict(num_gpus=2),
|
||||||
|
)
|
||||||
|
]
|
@ -5,19 +5,11 @@ models = [
|
|||||||
type=TurboMindModelwithChatTemplate,
|
type=TurboMindModelwithChatTemplate,
|
||||||
abbr='internlm2-chat-7b-turbomind',
|
abbr='internlm2-chat-7b-turbomind',
|
||||||
path='internlm/internlm2-chat-7b',
|
path='internlm/internlm2-chat-7b',
|
||||||
engine_config=dict(
|
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
|
||||||
max_batch_size=16,
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
|
||||||
tp=1,
|
max_seq_len=7168,
|
||||||
),
|
|
||||||
gen_config=dict(
|
|
||||||
top_k=1,
|
|
||||||
temperature=1e-6,
|
|
||||||
top_p=0.9,
|
|
||||||
),
|
|
||||||
max_seq_len=2048,
|
|
||||||
max_out_len=1024,
|
max_out_len=1024,
|
||||||
batch_size=32768,
|
batch_size=16,
|
||||||
run_cfg=dict(num_gpus=1),
|
run_cfg=dict(num_gpus=1),
|
||||||
stop_words=['</s>', '<|im_end|>'],
|
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user