OpenCompass/configs/datasets/obqa/obqa_ppl_1defe8.py
Xingjun.Wang edab1c07ba
[Feature] Support ModelScope datasets (#1289)
* add ceval, gsm8k modelscope surpport

* update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest

* update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets

* format file

* format file

* update dataset format

* support ms_dataset

* udpate dataset for modelscope support

* merge myl_dev and update test_ms_dataset

* udpate dataset for modelscope support

* update readme

* update eval_api_zhipu_v2

* remove unused code

* add get_data_path function

* update readme

* remove tydiqa japanese subset

* add ceval, gsm8k modelscope surpport

* update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest

* update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets

* format file

* format file

* update dataset format

* support ms_dataset

* udpate dataset for modelscope support

* merge myl_dev and update test_ms_dataset

* update readme

* udpate dataset for modelscope support

* update eval_api_zhipu_v2

* remove unused code

* add get_data_path function

* remove tydiqa japanese subset

* update util

* remove .DS_Store

* fix md format

* move util into package

* update docs/get_started.md

* restore eval_api_zhipu_v2.py, add environment setting

* Update dataset

* Update

* Update

* Update

* Update

---------

Co-authored-by: Yun lin <yunlin@U-Q9X2K4QV-1904.local>
Co-authored-by: Yunnglin <mao.looper@qq.com>
Co-authored-by: Yun lin <yunlin@laptop.local>
Co-authored-by: Yunnglin <maoyl@smail.nju.edu.cn>
Co-authored-by: zhangsongyang <zhangsongyang@pjlab.org.cn>
2024-07-29 13:48:32 +08:00

52 lines
1.7 KiB
Python

from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import PPLInferencer
from opencompass.openicl.icl_evaluator import AccEvaluator
from opencompass.datasets import OBQADataset
_input_columns = [
['question_stem', 'A', 'B', 'C', 'D'],
['question_stem', 'A', 'B', 'C', 'D', 'fact1'],
]
_template = [{
'A': '{question_stem} {A}',
'B': '{question_stem} {B}',
'C': '{question_stem} {C}',
'D': '{question_stem} {D}',
}, {
'A': 'Given the fact {fact1}, we know that {question_stem} {A}',
'B': 'Given the fact {fact1}, we know that {question_stem} {B}',
'C': 'Given the fact {fact1}, we know that {question_stem} {C}',
'D': 'Given the fact {fact1}, we know that {question_stem} {D}',
}]
obqa_datasets = [
dict(
abbr='openbookqa',
type=OBQADataset,
path='opencompass/openbookqa_test',
name='main',
),
dict(
abbr='openbookqa_fact',
type=OBQADataset,
path='opencompass/openbookqa_fact',
name='additional',
),
]
for _i in range(2):
obqa_reader_cfg = dict(
input_columns=_input_columns[_i], output_column='answerKey')
obqa_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=_template[_i]),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=PPLInferencer),
)
obqa_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
obqa_datasets[_i]['reader_cfg'] = obqa_reader_cfg
obqa_datasets[_i]['infer_cfg'] = obqa_infer_cfg
obqa_datasets[_i]['eval_cfg'] = obqa_eval_cfg