mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00

* add ceval, gsm8k modelscope surpport * update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest * update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets * format file * format file * update dataset format * support ms_dataset * udpate dataset for modelscope support * merge myl_dev and update test_ms_dataset * udpate dataset for modelscope support * update readme * update eval_api_zhipu_v2 * remove unused code * add get_data_path function * update readme * remove tydiqa japanese subset * add ceval, gsm8k modelscope surpport * update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest * update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets * format file * format file * update dataset format * support ms_dataset * udpate dataset for modelscope support * merge myl_dev and update test_ms_dataset * update readme * udpate dataset for modelscope support * update eval_api_zhipu_v2 * remove unused code * add get_data_path function * remove tydiqa japanese subset * update util * remove .DS_Store * fix md format * move util into package * update docs/get_started.md * restore eval_api_zhipu_v2.py, add environment setting * Update dataset * Update * Update * Update * Update --------- Co-authored-by: Yun lin <yunlin@U-Q9X2K4QV-1904.local> Co-authored-by: Yunnglin <mao.looper@qq.com> Co-authored-by: Yun lin <yunlin@laptop.local> Co-authored-by: Yunnglin <maoyl@smail.nju.edu.cn> Co-authored-by: zhangsongyang <zhangsongyang@pjlab.org.cn>
39 lines
1.3 KiB
Python
39 lines
1.3 KiB
Python
import json
|
|
|
|
from datasets import Dataset
|
|
|
|
from opencompass.registry import LOAD_DATASET
|
|
from opencompass.utils import get_data_path
|
|
|
|
from .base import BaseDataset
|
|
|
|
|
|
@LOAD_DATASET.register_module()
|
|
class MaxminDataset(BaseDataset):
|
|
|
|
@staticmethod
|
|
def load(test_path, answer_path=None):
|
|
test_path = get_data_path(test_path)
|
|
if answer_path is not None:
|
|
answer_path = get_data_path(answer_path)
|
|
with open(answer_path, 'r', encoding='utf-8') as answer_f:
|
|
answers = {}
|
|
for line in answer_f.readlines():
|
|
line = line.strip()
|
|
answers[line.split('<CODESPLIT>')[0]] = line.split(
|
|
'<CODESPLIT>')[1]
|
|
datasets = []
|
|
with open(test_path, 'r') as test_f:
|
|
test_data = json.load(test_f)
|
|
for item in test_data:
|
|
dataset = dict()
|
|
dataset['nl_tokens'] = ' '.join(item['nl_tokens'])
|
|
dataset['pl_tokens'] = ' '.join(item['pl_tokens'])
|
|
if answer_path is not None:
|
|
dataset['answer'] = 'A' if answers[
|
|
item['idx']] == 'max' else 'B'
|
|
else:
|
|
dataset['answer'] = ''
|
|
datasets.append(dataset)
|
|
return Dataset.from_list(datasets)
|