mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00

* add lveval benchmark * add LVEval readme file * update LVEval readme file * Update configs/eval_bluelm_32k_lveval.py * Update configs/eval_llama2_7b_lveval.py --------- Co-authored-by: yuantao <yuantao@infini-ai.com> Co-authored-by: Mo Li <82895469+DseidLi@users.noreply.github.com>
32 lines
1.0 KiB
Python
32 lines
1.0 KiB
Python
from datasets import Dataset, load_dataset
|
|
|
|
from opencompass.registry import LOAD_DATASET
|
|
|
|
from ..base import BaseDataset
|
|
|
|
|
|
@LOAD_DATASET.register_module()
|
|
class LVEvalmultifieldqazhDataset(BaseDataset):
|
|
|
|
@staticmethod
|
|
def load(**kwargs):
|
|
dataset = load_dataset(**kwargs)
|
|
split = 'test'
|
|
raw_data = []
|
|
for i in range(len(dataset[split])):
|
|
question = dataset[split]['input'][i]
|
|
context = dataset[split]['context'][i]
|
|
answers = dataset[split]['answers'][i]
|
|
confusing_facts = dataset[split]['confusing_facts'][i]
|
|
answer_keywords = dataset[split]['answer_keywords'][i]
|
|
answers_with_ak = answers + [answer_keywords]
|
|
raw_data.append({
|
|
'input': question,
|
|
'context': context,
|
|
'answers': answers_with_ak,
|
|
'confusing_facts': confusing_facts,
|
|
'answer_keywords': answer_keywords,
|
|
})
|
|
dataset[split] = Dataset.from_list(raw_data)
|
|
return dataset
|