mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00

* fix pip version * fix pip version * update (#1522) Co-authored-by: zhulin1 <zhulin1@pjlab.org.cn> * [Feature] Update Models (#1518) * Update Models * Update * Update humanevalx * Update * Update * [Feature] Dataset prompts update for ARC, BoolQ, Race (#1527) add judgerbench and reorg sub add judgerbench and reorg subeval add judgerbench and reorg subeval * add judgerbench and reorg subeval * add judgerbench and reorg subeval * add judgerbench and reorg subeval * add judgerbench and reorg subeval --------- Co-authored-by: zhulinJulia24 <145004780+zhulinJulia24@users.noreply.github.com> Co-authored-by: zhulin1 <zhulin1@pjlab.org.cn> Co-authored-by: Songyang Zhang <tonysy@users.noreply.github.com> Co-authored-by: Linchen Xiao <xxllcc1993@gmail.com>
63 lines
1.9 KiB
Python
63 lines
1.9 KiB
Python
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
|
from opencompass.openicl.icl_retriever import ZeroRetriever
|
|
from opencompass.openicl.icl_inferencer import GenInferencer
|
|
from opencompass.openicl.icl_evaluator import LMEvaluator
|
|
from opencompass.datasets import FollowBenchDataset, followbench_postprocess
|
|
|
|
subjective_reader_cfg = dict(
|
|
input_columns=['instruction', 'judge_prompt',],
|
|
output_column='judge',
|
|
)
|
|
|
|
subjective_all_sets = [
|
|
'followbench_llmeval_cn', 'followbench_llmeval_en',
|
|
]
|
|
data_path ='data/subjective/followbench/converted_data'
|
|
|
|
followbench_llmeval_datasets = []
|
|
|
|
for _name in subjective_all_sets:
|
|
subjective_infer_cfg = dict(
|
|
prompt_template=dict(
|
|
type=PromptTemplate,
|
|
template=dict(round=[
|
|
dict(
|
|
role='HUMAN',
|
|
prompt='{instruction}'
|
|
),
|
|
]),
|
|
),
|
|
retriever=dict(type=ZeroRetriever),
|
|
inferencer=dict(type=GenInferencer, max_out_len=2048),
|
|
)
|
|
|
|
subjective_eval_cfg = dict(
|
|
evaluator=dict(
|
|
type=LMEvaluator,
|
|
prompt_template=dict(
|
|
type=PromptTemplate,
|
|
template=dict(round=[
|
|
dict(
|
|
role='HUMAN',
|
|
prompt = '{judge_prompt}'
|
|
),
|
|
]),
|
|
),
|
|
dict_postprocessor=dict(type=followbench_postprocess),
|
|
),
|
|
pred_role='BOT',
|
|
)
|
|
|
|
followbench_llmeval_datasets.append(
|
|
dict(
|
|
abbr=f'{_name}',
|
|
type=FollowBenchDataset,
|
|
path=data_path,
|
|
name=_name,
|
|
mode='singlescore',
|
|
cate='llm',
|
|
reader_cfg=subjective_reader_cfg,
|
|
infer_cfg=subjective_infer_cfg,
|
|
eval_cfg=subjective_eval_cfg,
|
|
))
|