OpenCompass/opencompass/datasets/piqa.py
Xingjun.Wang edab1c07ba
[Feature] Support ModelScope datasets (#1289)
* add ceval, gsm8k modelscope surpport

* update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest

* update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets

* format file

* format file

* update dataset format

* support ms_dataset

* udpate dataset for modelscope support

* merge myl_dev and update test_ms_dataset

* udpate dataset for modelscope support

* update readme

* update eval_api_zhipu_v2

* remove unused code

* add get_data_path function

* update readme

* remove tydiqa japanese subset

* add ceval, gsm8k modelscope surpport

* update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest

* update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets

* format file

* format file

* update dataset format

* support ms_dataset

* udpate dataset for modelscope support

* merge myl_dev and update test_ms_dataset

* update readme

* udpate dataset for modelscope support

* update eval_api_zhipu_v2

* remove unused code

* add get_data_path function

* remove tydiqa japanese subset

* update util

* remove .DS_Store

* fix md format

* move util into package

* update docs/get_started.md

* restore eval_api_zhipu_v2.py, add environment setting

* Update dataset

* Update

* Update

* Update

* Update

---------

Co-authored-by: Yun lin <yunlin@U-Q9X2K4QV-1904.local>
Co-authored-by: Yunnglin <mao.looper@qq.com>
Co-authored-by: Yun lin <yunlin@laptop.local>
Co-authored-by: Yunnglin <maoyl@smail.nju.edu.cn>
Co-authored-by: zhangsongyang <zhangsongyang@pjlab.org.cn>
2024-07-29 13:48:32 +08:00

179 lines
6.7 KiB
Python

import json
import os
from os import environ
from datasets import Dataset, DatasetDict
from opencompass.registry import LOAD_DATASET
from opencompass.utils import get_data_path
from .base import BaseDataset
@LOAD_DATASET.register_module()
class PIQADataset(BaseDataset):
@staticmethod
def load_single(path, data_filename, label_filename):
data_path = os.path.join(path, data_filename)
label_path = os.path.join(path, label_filename)
dataset = []
with open(data_path, 'r', encoding='utf-8') as f:
data_lines = f.readlines()
with open(label_path, 'r', encoding='utf-8') as f:
label_lines = f.readlines()
assert len(data_lines) == len(label_lines)
for data, label in zip(data_lines, label_lines):
i = json.loads(data.strip())
i['label'] = int(label.strip())
del i['id']
dataset.append(i)
return Dataset.from_list(dataset)
@staticmethod
def load(path):
path = get_data_path(path)
if environ.get('DATASET_SOURCE') == 'ModelScope':
from modelscope import MsDataset
ms_dataset = MsDataset.load(path)
dataset = DatasetDict({
'train': ms_dataset['train'],
'validation': ms_dataset['validation']
})
else:
train_dataset = PIQADataset.load_single(path, 'train.jsonl',
'train-labels.lst')
val_dataset = PIQADataset.load_single(path, 'dev.jsonl',
'dev-labels.lst')
dataset = DatasetDict({
'train': train_dataset,
'validation': val_dataset
})
return dataset
@LOAD_DATASET.register_module()
class PIQADatasetV2(BaseDataset):
@staticmethod
def load_single(path, data_filename, label_filename):
data_path = os.path.join(path, data_filename)
label_path = os.path.join(path, label_filename)
dataset = []
with open(data_path, 'r', encoding='utf-8') as f:
data_lines = f.readlines()
with open(label_path, 'r', encoding='utf-8') as f:
label_lines = f.readlines()
assert len(data_lines) == len(label_lines)
for data, label in zip(data_lines, label_lines):
i = json.loads(data.strip())
label = int(label.strip())
if label < 0:
i['answer'] = 'NULL'
else:
i['answer'] = 'AB'[label]
del i['id']
dataset.append(i)
return Dataset.from_list(dataset)
@staticmethod
def load(path):
path = get_data_path(path)
if environ.get('DATASET_SOURCE') == 'ModelScope':
from modelscope import MsDataset
dataset = DatasetDict()
for split in ['train', 'validation']:
ms_dataset = MsDataset.load(path, split=split)
dataset_list = []
for item in ms_dataset:
label = item['label']
dataset_list.append({
'goal':
item['goal'],
'sol1':
item['sol1'],
'sol2':
item['sol2'],
'answer':
'NULL' if label < 0 else 'AB'[label]
})
dataset[split] = Dataset.from_list(dataset_list)
else:
train_dataset = PIQADatasetV2.load_single(path, 'train.jsonl',
'train-labels.lst')
val_dataset = PIQADatasetV2.load_single(path, 'dev.jsonl',
'dev-labels.lst')
dataset = DatasetDict({
'train': train_dataset,
'validation': val_dataset
})
return dataset
@LOAD_DATASET.register_module()
class PIQADatasetV3(BaseDataset):
@staticmethod
def load_single(path, data_filename, label_filename):
data_path = os.path.join(path, data_filename)
label_path = os.path.join(path, label_filename)
dataset = []
with open(data_path, 'r', encoding='utf-8') as f:
data_lines = f.readlines()
with open(label_path, 'r', encoding='utf-8') as f:
label_lines = f.readlines()
assert len(data_lines) == len(label_lines)
for data, label in zip(data_lines, label_lines):
i = json.loads(data.strip())
i['label'] = int(label.strip())
# some preprocessing
i['goal'] = i['goal'][0].upper() + i['goal'][1:]
if i['goal'].endswith('?') or i['goal'].endswith('.'):
i['sol1'] = i['sol1'][0].upper() + i['sol1'][1:]
i['sol2'] = i['sol2'][0].upper() + i['sol2'][1:]
else:
i['sol1'] = i['sol1'][0].lower() + i['sol1'][1:]
i['sol2'] = i['sol2'][0].lower() + i['sol2'][1:]
del i['id']
dataset.append(i)
return Dataset.from_list(dataset)
@staticmethod
def load(path):
path = get_data_path(path)
if environ.get('DATASET_SOURCE') == 'ModelScope':
from modelscope import MsDataset
dataset = DatasetDict()
for split in ['train', 'validation']:
ms_dataset = MsDataset.load(path, split=split)
dataset_list = []
for item in ms_dataset:
label = item['label']
goal = item['goal'][0].upper() + item['goal'][1:]
if goal.endswith('?') or goal.endswith('.'):
sol1 = item['sol1'][0].upper() + item['sol1'][1:]
sol2 = item['sol2'][0].upper() + item['sol2'][1:]
else:
sol1 = item['sol1'][0].lower() + item['sol1'][1:]
sol2 = item['sol2'][0].lower() + item['sol2'][1:]
dataset_list.append({
'goal': goal,
'sol1': sol1,
'sol2': sol2,
'label': label
})
dataset[split] = Dataset.from_list(dataset_list)
else:
train_dataset = PIQADatasetV3.load_single(path, 'train.jsonl',
'train-labels.lst')
val_dataset = PIQADatasetV3.load_single(path, 'dev.jsonl',
'dev-labels.lst')
dataset = DatasetDict({
'train': train_dataset,
'validation': val_dataset
})
return dataset