OpenCompass/opencompass/datasets/triviaqarc.py
Xingjun.Wang edab1c07ba
[Feature] Support ModelScope datasets (#1289)
* add ceval, gsm8k modelscope surpport

* update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest

* update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets

* format file

* format file

* update dataset format

* support ms_dataset

* udpate dataset for modelscope support

* merge myl_dev and update test_ms_dataset

* udpate dataset for modelscope support

* update readme

* update eval_api_zhipu_v2

* remove unused code

* add get_data_path function

* update readme

* remove tydiqa japanese subset

* add ceval, gsm8k modelscope surpport

* update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest

* update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets

* format file

* format file

* update dataset format

* support ms_dataset

* udpate dataset for modelscope support

* merge myl_dev and update test_ms_dataset

* update readme

* udpate dataset for modelscope support

* update eval_api_zhipu_v2

* remove unused code

* add get_data_path function

* remove tydiqa japanese subset

* update util

* remove .DS_Store

* fix md format

* move util into package

* update docs/get_started.md

* restore eval_api_zhipu_v2.py, add environment setting

* Update dataset

* Update

* Update

* Update

* Update

---------

Co-authored-by: Yun lin <yunlin@U-Q9X2K4QV-1904.local>
Co-authored-by: Yunnglin <mao.looper@qq.com>
Co-authored-by: Yun lin <yunlin@laptop.local>
Co-authored-by: Yunnglin <maoyl@smail.nju.edu.cn>
Co-authored-by: zhangsongyang <zhangsongyang@pjlab.org.cn>
2024-07-29 13:48:32 +08:00

61 lines
2.0 KiB
Python

from datasets import Dataset, DatasetDict
from opencompass.registry import LOAD_DATASET
from opencompass.utils import get_data_path
from .base import BaseDataset
@LOAD_DATASET.register_module()
class TriviaQArcDataset(BaseDataset):
@staticmethod
def load(path: str):
path = get_data_path(path, local_mode=True)
import json
import os
dataset_dict = DatasetDict()
split = 'dev'
dev_list = []
web_dev = os.path.join(path, 'qa', 'verified-web-dev.json')
with open(web_dev, 'r') as f:
web_dev_json = json.load(f)
for x in web_dev_json['Data']:
cand_answers = x['Answer']['Aliases'] + x['Answer']['HumanAnswers']
question = x['Question']
evidence = ''
if x['SearchResults']:
x_path = os.path.join(path, 'evidence', 'web',
x['SearchResults'][0]['Filename'])
with open(x_path, 'r') as f:
evidence = f.read(100000)
dev_list.append({
'answer': cand_answers,
'question': question,
'evidence': evidence,
})
wiki_dev = os.path.join(path, 'qa', 'verified-wikipedia-dev.json')
with open(wiki_dev, 'r') as f:
wiki_dev_json = json.load(f)
for x in wiki_dev_json['Data']:
cand_answers = x['Answer']['Aliases']
question = x['Question']
evidence = ''
if x['EntityPages']:
x_path = os.path.join(path, 'evidence', 'wikipedia',
x['EntityPages'][0]['Filename'])
with open(x_path, 'r') as f:
evidence = f.read(100000)
dev_list.append({
'answer': cand_answers,
'question': question,
'evidence': evidence,
})
dataset_dict[split] = Dataset.from_list(dev_list)
return dataset_dict