mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00

* add ceval, gsm8k modelscope surpport * update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest * update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets * format file * format file * update dataset format * support ms_dataset * udpate dataset for modelscope support * merge myl_dev and update test_ms_dataset * udpate dataset for modelscope support * update readme * update eval_api_zhipu_v2 * remove unused code * add get_data_path function * update readme * remove tydiqa japanese subset * add ceval, gsm8k modelscope surpport * update race, mmlu, arc, cmmlu, commonsenseqa, humaneval and unittest * update bbh, flores, obqa, siqa, storycloze, summedits, winogrande, xsum datasets * format file * format file * update dataset format * support ms_dataset * udpate dataset for modelscope support * merge myl_dev and update test_ms_dataset * update readme * udpate dataset for modelscope support * update eval_api_zhipu_v2 * remove unused code * add get_data_path function * remove tydiqa japanese subset * update util * remove .DS_Store * fix md format * move util into package * update docs/get_started.md * restore eval_api_zhipu_v2.py, add environment setting * Update dataset * Update * Update * Update * Update --------- Co-authored-by: Yun lin <yunlin@U-Q9X2K4QV-1904.local> Co-authored-by: Yunnglin <mao.looper@qq.com> Co-authored-by: Yun lin <yunlin@laptop.local> Co-authored-by: Yunnglin <maoyl@smail.nju.edu.cn> Co-authored-by: zhangsongyang <zhangsongyang@pjlab.org.cn>
54 lines
1.6 KiB
Python
54 lines
1.6 KiB
Python
import json
|
|
import re
|
|
import string
|
|
from os import environ
|
|
|
|
from datasets import Dataset, DatasetDict
|
|
|
|
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
|
from opencompass.registry import ICL_EVALUATORS, LOAD_DATASET
|
|
from opencompass.utils import get_data_path
|
|
from opencompass.utils.text_postprocessors import general_postprocess
|
|
|
|
from .base import BaseDataset
|
|
|
|
|
|
@LOAD_DATASET.register_module()
|
|
class lambadaDataset(BaseDataset):
|
|
|
|
@staticmethod
|
|
def load(path):
|
|
path = get_data_path(path)
|
|
if environ.get('DATASET_SOURCE') == 'ModelScope':
|
|
from modelscope import MsDataset
|
|
dataset = MsDataset.load(path)
|
|
return dataset
|
|
else:
|
|
dataset = []
|
|
with open(path, 'r', encoding='utf-8') as f:
|
|
for line in f:
|
|
dataset.append(json.loads(line))
|
|
dataset = Dataset.from_list(dataset)
|
|
return DatasetDict({'test': dataset})
|
|
|
|
|
|
@ICL_EVALUATORS.register_module()
|
|
class LambadaEvaluator(BaseEvaluator):
|
|
|
|
def __init__(self) -> None:
|
|
super().__init__()
|
|
|
|
def score(self, predictions, references):
|
|
if len(predictions) != len(references):
|
|
return {
|
|
'error': 'predictions and references have different '
|
|
'length'
|
|
}
|
|
score = 0.0
|
|
for pred, refer in zip(predictions, references):
|
|
pred = pred.strip().split(' ')[0]
|
|
pred = re.split(f'[{string.punctuation}]', pred)[0]
|
|
score += general_postprocess(pred) == general_postprocess(refer)
|
|
score = 100.0 * score / len(predictions)
|
|
return dict(accuracy=score)
|