2023-09-27 21:18:48 +08:00
|
|
|
import csv
|
2023-12-08 10:00:11 +08:00
|
|
|
import json
|
2023-07-05 09:01:25 +08:00
|
|
|
import os.path as osp
|
|
|
|
|
2023-09-27 21:18:48 +08:00
|
|
|
from datasets import Dataset, DatasetDict
|
2023-07-05 09:01:25 +08:00
|
|
|
|
|
|
|
from opencompass.registry import LOAD_DATASET
|
|
|
|
|
|
|
|
from .base import BaseDataset
|
|
|
|
|
|
|
|
|
|
|
|
@LOAD_DATASET.register_module()
|
|
|
|
class CEvalDataset(BaseDataset):
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def load(path: str, name: str):
|
2023-09-27 21:18:48 +08:00
|
|
|
dataset = {}
|
|
|
|
for split in ['dev', 'val', 'test']:
|
2023-10-27 20:31:22 +08:00
|
|
|
filename = osp.join(path, split, f'{name}_{split}.csv')
|
|
|
|
with open(filename, encoding='utf-8') as f:
|
2023-09-27 21:18:48 +08:00
|
|
|
reader = csv.reader(f)
|
|
|
|
header = next(reader)
|
|
|
|
for row in reader:
|
|
|
|
item = dict(zip(header, row))
|
|
|
|
item.setdefault('explanation', '')
|
|
|
|
item.setdefault('answer', '')
|
|
|
|
dataset.setdefault(split, []).append(item)
|
|
|
|
dataset = {i: Dataset.from_list(dataset[i]) for i in dataset}
|
|
|
|
return DatasetDict(dataset)
|
2023-12-08 10:00:11 +08:00
|
|
|
|
|
|
|
|
|
|
|
class CEvalDatasetClean(BaseDataset):
|
|
|
|
|
|
|
|
# load the contamination annotations of CEval from
|
|
|
|
# https://github.com/liyucheng09/Contamination_Detector
|
|
|
|
@staticmethod
|
|
|
|
def load_contamination_annotations(path, split='val'):
|
|
|
|
import requests
|
|
|
|
|
|
|
|
assert split == 'val', 'Now we only have annotations for val set'
|
|
|
|
annotation_cache_path = osp.join(
|
|
|
|
path, split, 'ceval_contamination_annotations.json')
|
|
|
|
if osp.exists(annotation_cache_path):
|
|
|
|
with open(annotation_cache_path, 'r') as f:
|
|
|
|
annotations = json.load(f)
|
|
|
|
return annotations
|
|
|
|
link_of_annotations = 'https://github.com/liyucheng09/Contamination_Detector/releases/download/v0.1.1rc/ceval_annotations.json' # noqa
|
|
|
|
annotations = json.loads(requests.get(link_of_annotations).text)
|
|
|
|
with open(annotation_cache_path, 'w') as f:
|
|
|
|
json.dump(annotations, f)
|
|
|
|
return annotations
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def load(path: str, name: str):
|
|
|
|
dataset = {}
|
|
|
|
for split in ['dev', 'val', 'test']:
|
|
|
|
if split == 'val':
|
|
|
|
annotations = CEvalDatasetClean.load_contamination_annotations(
|
|
|
|
path, split)
|
|
|
|
filename = osp.join(path, split, f'{name}_{split}.csv')
|
|
|
|
with open(filename, encoding='utf-8') as f:
|
|
|
|
reader = csv.reader(f)
|
|
|
|
header = next(reader)
|
|
|
|
for row_index, row in enumerate(reader):
|
|
|
|
item = dict(zip(header, row))
|
|
|
|
item.setdefault('explanation', '')
|
|
|
|
item.setdefault('answer', '')
|
|
|
|
if split == 'val':
|
|
|
|
row_id = f'{name}-{row_index}'
|
|
|
|
if row_id in annotations:
|
|
|
|
item['is_clean'] = annotations[row_id][0]
|
|
|
|
else:
|
|
|
|
item['is_clean'] = 'not labeled'
|
|
|
|
dataset.setdefault(split, []).append(item)
|
|
|
|
dataset = {i: Dataset.from_list(dataset[i]) for i in dataset}
|
|
|
|
return DatasetDict(dataset)
|