mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Feature] BABILong Dataset added (#1684)
* update * update * update * update
This commit is contained in:
parent
2fee63f537
commit
e92a5d4230
65
configs/eval_babilong.py
Normal file
65
configs/eval_babilong.py
Normal file
@ -0,0 +1,65 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
# Models
|
||||
from opencompass.configs.models.hf_internlm.lmdeploy_internlm2_5_7b_chat import (
|
||||
models as lmdeploy_internlm2_5_7b_chat_model,
|
||||
)
|
||||
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_7b_instruct import (
|
||||
models as lmdeploy_qwen2_5_7b_instruct_model,
|
||||
)
|
||||
from opencompass.configs.models.hf_llama.lmdeploy_llama3_1_8b_instruct import (
|
||||
models as lmdeploy_llama3_1_8b_instruct_model,
|
||||
)
|
||||
from opencompass.configs.models.mistral.lmdeploy_ministral_8b_instruct_2410 import (
|
||||
models as lmdeploy_ministral_8b_instruct_2410_model,
|
||||
)
|
||||
|
||||
# Datasets
|
||||
from opencompass.configs.datasets.babilong.babilong_0k_gen import (
|
||||
babiLong_0k_datasets,
|
||||
)
|
||||
from opencompass.configs.datasets.babilong.babilong_4k_gen import (
|
||||
babiLong_4k_datasets,
|
||||
)
|
||||
from opencompass.configs.datasets.babilong.babilong_16k_gen import (
|
||||
babiLong_16k_datasets,
|
||||
)
|
||||
from opencompass.configs.datasets.babilong.babilong_32k_gen import (
|
||||
babiLong_32k_datasets,
|
||||
)
|
||||
from opencompass.configs.datasets.babilong.babilong_128k_gen import (
|
||||
babiLong_128k_datasets,
|
||||
)
|
||||
from opencompass.configs.datasets.babilong.babilong_256k_gen import (
|
||||
babiLong_256k_datasets,
|
||||
)
|
||||
from opencompass.configs.summarizers.groups.babilong import (
|
||||
babilong_summary_groups,
|
||||
)
|
||||
|
||||
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
||||
|
||||
models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
|
||||
for model in models:
|
||||
model['engine_config']['session_len'] = 1024 * 1024
|
||||
model['max_seq_len'] = 1024 * 1024
|
||||
model['engine_config']['tp'] = 4
|
||||
model['run_cfg']['num_gpus'] = 4
|
||||
|
||||
|
||||
summarizer = dict(
|
||||
dataset_abbrs=[
|
||||
'babilong_0k',
|
||||
'babilong_4k',
|
||||
'babilong_16k',
|
||||
'babilong_32k',
|
||||
'babilong_128k',
|
||||
'babilong_256k',
|
||||
],
|
||||
summary_groups=sum(
|
||||
[v for k, v in locals().items() if k.endswith('_summary_groups')], []
|
||||
),
|
||||
)
|
||||
|
||||
work_dir = './outputs/babilong'
|
37
opencompass/configs/datasets/babilong/README.md
Normal file
37
opencompass/configs/datasets/babilong/README.md
Normal file
@ -0,0 +1,37 @@
|
||||
# BABILong
|
||||
OpenCompass now supports the brand new long-context language model evaluation benchmark — [BABILong](https://arxiv.org/pdf/2406.10149). BABILong provides an evaluation of long-context reasoning across extremely long documents, including a diverse set of 20 reasoning tasks such as fact chaining, simple induction, deduction, counting, and handling lists/sets. This benchmark is designed to test the ability of language models to reason over facts distributed in long natural text, and it allows for the construction of tasks of almost arbitrary length to adapt to the evaluation of new, more powerful models in an extensible and controllable way.
|
||||
|
||||
|
||||
|
||||
## How to Use
|
||||
The BABILong dataset is available on Hugging Face: [RMT-team/babilong](https://huggingface.co/datasets/RMT-team/babilong). Opencompass provides an automatic download for BABILong dataset, due to the dataset size, we only provide the data up to 1M tokens. For longer context, you can download the dataset from Hugging Face directly.
|
||||
|
||||
BABILong paper provides in total 20 tasks, we provide 10 tasks configurations in OpenCompass and they are organized by different context sizes. You can create your own configurations by following the examples in `opencompass/configs/datasets/babilong/babilong_1m_gen.py`.
|
||||
|
||||
Opencompass provides a demo for evaluating language models on the BABILong dataset.
|
||||
|
||||
```bash
|
||||
opencompass configs/eval_babilong.py
|
||||
```
|
||||
OpenCompass provides the results of some models on the BABILong dataset. The evaluation results are run with LMDeploy with default model settings.
|
||||
|
||||
| dataset | version | metric | mode | internlm2_5-7b-chat-turbomind | qwen2.5-7b-instruct-turbomind | llama-3_1-8b-instruct-turbomind | ministral-8B-instruct-2410-turbomind |
|
||||
|----- | ----- | ----- | ----- | ----- | ----- | ----- | -----|
|
||||
| babilong_0k | - | naive_average | gen | 76.51 | 80.25 | 76.44 | 76.40 |
|
||||
| babilong_4k | - | naive_average | gen | 67.55 | 70.35 | 67.41 | 67.92 |
|
||||
| babilong_16k | - | naive_average | gen | 53.78 | 65.83 | 60.26 | 56.58 |
|
||||
| babilong_32k | - | naive_average | gen | 50.86 | 62.66 | 59.56 | 53.52 |
|
||||
| babilong_128k | - | naive_average | gen | 39.33 | 27.79 | 52.01 | 3.20 |
|
||||
| babilong_256k | - | naive_average | gen | 17.31 | 7.30 | 23.35 | 9.50 |
|
||||
|
||||
## Citation
|
||||
|
||||
```bibtex
|
||||
@misc{kuratov2024babilong,
|
||||
title={BABILong: Testing the Limits of LLMs with Long Context Reasoning-in-a-Haystack},
|
||||
author={Yuri Kuratov and Aydar Bulatov and Petr Anokhin and Ivan Rodkin and Dmitry Sorokin and Artyom Sorokin and Mikhail Burtsev},
|
||||
year={2024},
|
||||
eprint={2406.10149},
|
||||
archivePrefix={arXiv}
|
||||
}
|
||||
```
|
37
opencompass/configs/datasets/babilong/babilong_0k_gen.py
Normal file
37
opencompass/configs/datasets/babilong/babilong_0k_gen.py
Normal file
@ -0,0 +1,37 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_0k_datasets = []
|
||||
split_name='0k'
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_0k_datasets.append(tmp_dataset)
|
38
opencompass/configs/datasets/babilong/babilong_128k_gen.py
Normal file
38
opencompass/configs/datasets/babilong/babilong_128k_gen.py
Normal file
@ -0,0 +1,38 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_128k_datasets = []
|
||||
split_name='128k'
|
||||
max_seq_len = 128*1024
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_seq_len=max_seq_len),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_128k_datasets.append(tmp_dataset)
|
38
opencompass/configs/datasets/babilong/babilong_16k_gen.py
Normal file
38
opencompass/configs/datasets/babilong/babilong_16k_gen.py
Normal file
@ -0,0 +1,38 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_16k_datasets = []
|
||||
split_name='16k'
|
||||
max_seq_len = 16*1024
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_seq_len=max_seq_len),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_16k_datasets.append(tmp_dataset)
|
37
opencompass/configs/datasets/babilong/babilong_1m_gen.py
Normal file
37
opencompass/configs/datasets/babilong/babilong_1m_gen.py
Normal file
@ -0,0 +1,37 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_1m_datasets = []
|
||||
split_name='1m'
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_1m_datasets.append(tmp_dataset)
|
38
opencompass/configs/datasets/babilong/babilong_256k_gen.py
Normal file
38
opencompass/configs/datasets/babilong/babilong_256k_gen.py
Normal file
@ -0,0 +1,38 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_256k_datasets = []
|
||||
split_name='256k'
|
||||
max_seq_len = 256*1024
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_seq_len=max_seq_len ),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_256k_datasets.append(tmp_dataset)
|
38
opencompass/configs/datasets/babilong/babilong_2k_gen.py
Normal file
38
opencompass/configs/datasets/babilong/babilong_2k_gen.py
Normal file
@ -0,0 +1,38 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_2k_datasets = []
|
||||
split_name='2k'
|
||||
max_seq_len = 2*1024
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_seq_len=max_seq_len),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_2k_datasets.append(tmp_dataset)
|
38
opencompass/configs/datasets/babilong/babilong_32k_gen.py
Normal file
38
opencompass/configs/datasets/babilong/babilong_32k_gen.py
Normal file
@ -0,0 +1,38 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_32k_datasets = []
|
||||
split_name='32k'
|
||||
max_seq_len = 32*1024
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_seq_len=max_seq_len),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_32k_datasets.append(tmp_dataset)
|
38
opencompass/configs/datasets/babilong/babilong_4k_gen.py
Normal file
38
opencompass/configs/datasets/babilong/babilong_4k_gen.py
Normal file
@ -0,0 +1,38 @@
|
||||
from opencompass.datasets.babilong.babilong import BabiLongDataset, BabiLongEvaluator
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
|
||||
babiLong_4k_datasets = []
|
||||
split_name='4k'
|
||||
max_seq_len=4*1024
|
||||
tasks = ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']
|
||||
|
||||
|
||||
for task in tasks:
|
||||
tmp_dataset = {
|
||||
'abbr': f'babilong_{task}_{split_name}',
|
||||
'type': BabiLongDataset,
|
||||
'path': 'opencompass/babilong',
|
||||
'task': task,
|
||||
'split_name': split_name,
|
||||
'reader_cfg': dict(input_columns=['prompt'], output_column='answer'),
|
||||
'infer_cfg': dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
dict(role='BOT', prompt='{answer}\n'),
|
||||
]
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_seq_len=max_seq_len),
|
||||
),
|
||||
'eval_cfg': dict(
|
||||
evaluator=dict(type=BabiLongEvaluator),
|
||||
),
|
||||
}
|
||||
babiLong_4k_datasets.append(tmp_dataset)
|
37
opencompass/configs/summarizers/groups/babilong.py
Normal file
37
opencompass/configs/summarizers/groups/babilong.py
Normal file
@ -0,0 +1,37 @@
|
||||
default_babilong_tasks = [
|
||||
'qa1',
|
||||
'qa2',
|
||||
'qa3',
|
||||
'qa4',
|
||||
'qa5',
|
||||
'qa6',
|
||||
'qa7',
|
||||
'qa8',
|
||||
'qa9',
|
||||
'qa10',
|
||||
]
|
||||
context_window_sizes = [
|
||||
'0k',
|
||||
'1k',
|
||||
'2k',
|
||||
'4k',
|
||||
'8k',
|
||||
'16k',
|
||||
'32k',
|
||||
'64k',
|
||||
'128k',
|
||||
'256k',
|
||||
'512k',
|
||||
'1m',
|
||||
]
|
||||
babilong_summary_groups = []
|
||||
for context_window_size in context_window_sizes:
|
||||
babilong_summary_groups.append(
|
||||
{
|
||||
'name': f'babilong_{context_window_size}',
|
||||
'subsets': [
|
||||
f'babilong_{task}_{context_window_size}'
|
||||
for task in default_babilong_tasks
|
||||
],
|
||||
}
|
||||
)
|
@ -7,6 +7,7 @@ from .anthropics_evals import * # noqa: F401, F403
|
||||
from .apps import * # noqa: F401, F403
|
||||
from .arc import * # noqa: F401, F403
|
||||
from .ax import * # noqa: F401, F403
|
||||
from .babilong import * # noqa: F401, F403
|
||||
from .bbh import * # noqa: F401, F403
|
||||
from .boolq import * # noqa: F401, F403
|
||||
from .bustum import * # noqa: F401, F403
|
||||
|
1
opencompass/datasets/babilong/__init__.py
Normal file
1
opencompass/datasets/babilong/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from .babilong import * # noqa: F401, F403
|
106
opencompass/datasets/babilong/babilong.py
Normal file
106
opencompass/datasets/babilong/babilong.py
Normal file
@ -0,0 +1,106 @@
|
||||
# flake8: noqa: F401, E501
|
||||
import json
|
||||
import os
|
||||
|
||||
from datasets import Dataset
|
||||
|
||||
from opencompass.datasets.babilong.babilong_utils import compare_answers
|
||||
from opencompass.datasets.babilong.prompts import (DEFAULT_PROMPTS,
|
||||
DEFAULT_TEMPLATE,
|
||||
get_formatted_input)
|
||||
from opencompass.datasets.base import BaseDataset
|
||||
from opencompass.openicl import BaseEvaluator
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
from opencompass.utils import get_data_path
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class BabiLongDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(
|
||||
path,
|
||||
task,
|
||||
split_name,
|
||||
use_instruction=True,
|
||||
use_examples=True,
|
||||
use_post_prompt=True,
|
||||
) -> Dataset:
|
||||
|
||||
assert task in [
|
||||
'qa1',
|
||||
'qa2',
|
||||
'qa3',
|
||||
'qa4',
|
||||
'qa5',
|
||||
'qa6',
|
||||
'qa7',
|
||||
'qa8',
|
||||
'qa9',
|
||||
'qa10',
|
||||
], f"Task must be in ['qa1', 'qa2', 'qa3', 'qa4', 'qa5', 'qa6', 'qa7', 'qa8', 'qa9', 'qa10']"
|
||||
assert split_name in [
|
||||
'0k',
|
||||
'1k',
|
||||
'2k',
|
||||
'4k',
|
||||
'8k',
|
||||
'16k',
|
||||
'32k',
|
||||
'64k',
|
||||
'128k',
|
||||
'256k',
|
||||
'512k',
|
||||
'1m',
|
||||
], f"Split name must be in ['0k', '1k', '2k', '4k', '8k', '16k', '32k', '64k', '128k', '256k', '512k', '1m']"
|
||||
|
||||
# configure the prompt
|
||||
prompt_cfg = {
|
||||
'instruction':
|
||||
(DEFAULT_PROMPTS[task]['instruction'] if use_instruction else ''),
|
||||
'examples':
|
||||
(DEFAULT_PROMPTS[task]['examples'] if use_examples else ''),
|
||||
'post_prompt':
|
||||
(DEFAULT_PROMPTS[task]['post_prompt'] if use_post_prompt else ''),
|
||||
'template':
|
||||
DEFAULT_TEMPLATE,
|
||||
}
|
||||
|
||||
path = get_data_path(path)
|
||||
file = os.path.join(path, task, f'{split_name}.json')
|
||||
|
||||
with open(file, 'r') as f:
|
||||
task_data = json.load(f)
|
||||
|
||||
data = []
|
||||
for sample in task_data:
|
||||
tmp_data = {'prompt': [], 'answer': []}
|
||||
target = sample['target']
|
||||
context = sample['input']
|
||||
question = sample['question']
|
||||
|
||||
input_text = get_formatted_input(
|
||||
context,
|
||||
question,
|
||||
prompt_cfg['examples'],
|
||||
prompt_cfg['instruction'],
|
||||
prompt_cfg['post_prompt'],
|
||||
template=DEFAULT_TEMPLATE,
|
||||
)
|
||||
|
||||
tmp_data['prompt'].append(input_text)
|
||||
tmp_data['answer'].append(target)
|
||||
data.append(tmp_data)
|
||||
return Dataset.from_list(data)
|
||||
|
||||
|
||||
class BabiLongEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, gold):
|
||||
assert len(predictions) == len(gold)
|
||||
score = (sum([
|
||||
compare_answers(str(ref[0]), pred)
|
||||
for pred, ref in zip(predictions, gold)
|
||||
]) / len(predictions) * 100)
|
||||
result = {'score': round(score, 2)}
|
||||
return result
|
293
opencompass/datasets/babilong/babilong_utils.py
Normal file
293
opencompass/datasets/babilong/babilong_utils.py
Normal file
@ -0,0 +1,293 @@
|
||||
# flake8: noqa: E501
|
||||
# Modifided from https://github.com/booydar/babilong/blob/main/babilong/babilong_utils.py
|
||||
import re
|
||||
|
||||
import nltk
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
|
||||
def compare_answers(target, output):
|
||||
"""Compare target and output answers.
|
||||
|
||||
Takes only the first sentence from output and filters responses when model
|
||||
tries to generate examples. We consider prediction correct if target is in
|
||||
output.
|
||||
"""
|
||||
target = target.lower()
|
||||
output = output.lower()
|
||||
# take only the first sentence from output
|
||||
output = output.split('.')[0]
|
||||
# filter responses when model tries to generate examples
|
||||
output = output.split('<context>')[0]
|
||||
output = output.split('<example>')[0]
|
||||
|
||||
# we consider prediction correct if target is in output
|
||||
if target in output:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_dataset_df(dataset_path, max_n_facts=None):
|
||||
"""Preprocess babi text files."""
|
||||
with open(dataset_path, 'r') as f:
|
||||
texts = f.read().strip()
|
||||
texts = texts.split('\n')
|
||||
df = pd.DataFrame(texts, columns=['text'])
|
||||
|
||||
# parse samples
|
||||
df['phrase_num'] = df.text.apply(lambda x: int(x.split(' ')[0]))
|
||||
df.text = df.text.apply(lambda x: x[x.index(' ') + 1:])
|
||||
df['answer'] = df.text.apply(lambda x: x[x.index('\t') + 1:]
|
||||
if '\t' in x else None)
|
||||
df['reference_num'] = df.answer.apply(
|
||||
lambda x: x
|
||||
if x is None else [int(n) for n in re.split('\t| ', x)[1:]])
|
||||
df.answer = df.answer.apply(lambda x: x if x is None else x.split('\t')[0])
|
||||
df.text = df.text.apply(lambda x: x.split('\t')[0] if '\t' in x else x)
|
||||
|
||||
# mark each sample
|
||||
sample_start_inds = list(np.where(df.phrase_num == 1)[0]) + [df.shape[0]]
|
||||
for i, (start,
|
||||
end) in enumerate(zip(sample_start_inds, sample_start_inds[1:])):
|
||||
df.loc[start:end, 'initial_sample_num'] = i
|
||||
|
||||
df.initial_sample_num = df.initial_sample_num.astype(int)
|
||||
|
||||
# multiple questions in sample -> samples with single question
|
||||
initial_samples = [
|
||||
df[df.initial_sample_num == sn]
|
||||
for sn in df.initial_sample_num.unique()
|
||||
]
|
||||
|
||||
single_question_slices = []
|
||||
for sample in initial_samples:
|
||||
answer_positions = sample[~sample.answer.isna()].index
|
||||
slices = [sample.loc[:ans_pos].copy() for ans_pos in answer_positions]
|
||||
for i, slc in enumerate(slices):
|
||||
slices[i] = slc[(slc.answer.isna()) | (slc.index == slc.index[-1])]
|
||||
if max_n_facts is not None: # drop samples with too many facts
|
||||
slices = [slc for slc in slices if slc.shape[0] <= max_n_facts]
|
||||
single_question_slices += slices
|
||||
|
||||
df = pd.concat(single_question_slices).reset_index(drop=True)
|
||||
|
||||
# mark each sample again
|
||||
sample_start_inds = list(np.where(df.phrase_num == 1)[0]) + [df.shape[0]]
|
||||
for i, (start,
|
||||
end) in enumerate(zip(sample_start_inds, sample_start_inds[1:])):
|
||||
df.loc[start:end, 'sample_num'] = i
|
||||
|
||||
df.sample_num = df.sample_num.astype(int)
|
||||
|
||||
return df
|
||||
|
||||
|
||||
class TaskDataset(Dataset):
|
||||
"""Babi task loader dataset."""
|
||||
|
||||
def __init__(self, dataset_path, max_n_facts=None):
|
||||
self.fact_dataset = get_dataset_df(dataset_path,
|
||||
max_n_facts=max_n_facts)
|
||||
|
||||
def __getitem__(self, ind):
|
||||
slc = self.fact_dataset[self.fact_dataset.sample_num == ind]
|
||||
references = slc[slc.phrase_num.isin(
|
||||
slc.reference_num.values[-1])].text.values
|
||||
sample = {
|
||||
'facts': slc.text.values[:-1],
|
||||
'question': slc.text.values[-1],
|
||||
'answer': slc.answer.values[-1],
|
||||
'references': references,
|
||||
}
|
||||
return sample
|
||||
|
||||
def __len__(self):
|
||||
return self.fact_dataset.sample_num.max()
|
||||
|
||||
|
||||
def sum_lengths(sentences):
|
||||
return sum([len(s) for s in sentences])
|
||||
|
||||
|
||||
class SentenceSampler:
|
||||
"""Sampler of background text."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dataset,
|
||||
tokenizer,
|
||||
min_sentence_len=10,
|
||||
max_sentence_len=None,
|
||||
shuffle=False,
|
||||
random_seed=42,
|
||||
):
|
||||
self.sample_ind = 0
|
||||
self.dataset = dataset
|
||||
self.sentences = []
|
||||
self.tokenizer = tokenizer
|
||||
self.min_sentence_len = min_sentence_len
|
||||
self.max_sentence_len = max_sentence_len
|
||||
self.sentence_tokenizer = nltk.PunktSentenceTokenizer()
|
||||
self.shuffle = shuffle
|
||||
self.gen = np.random.default_rng(seed=random_seed)
|
||||
|
||||
def get_sample(self, sample_size):
|
||||
sample = []
|
||||
total_len = 0
|
||||
while True:
|
||||
sentences = list(self.sentences)
|
||||
for i, sent in enumerate(
|
||||
sentences
|
||||
): # add new sentence until sample_size is reached
|
||||
tokenized = self.tokenizer.encode(sent,
|
||||
add_special_tokens=False)
|
||||
if not self.length_is_ok(tokenized):
|
||||
continue
|
||||
total_len += len(tokenized)
|
||||
sample.append(tokenized)
|
||||
if total_len >= sample_size:
|
||||
self.sentences = self.sentences[i + 1:]
|
||||
cutoff = total_len - sample_size
|
||||
if cutoff > 0:
|
||||
sample[-1] = sample[-1][:-cutoff]
|
||||
return sample
|
||||
|
||||
self.sentences = []
|
||||
self.sample_sentences_(
|
||||
sample_size
|
||||
) # appends new sentences, can be updated to just return new sentences
|
||||
|
||||
def sample_sentences_(self, sample_size):
|
||||
sentences = []
|
||||
while len(sentences) == 0:
|
||||
text = self.next_sample_()
|
||||
if self.shuffle:
|
||||
if len(text) == 0:
|
||||
continue
|
||||
text = text[self.gen.choice(len(
|
||||
text)):] # start from random position in text
|
||||
text = text[:sample_size *
|
||||
10] # cut too long texts to speed up tokenization
|
||||
sentences += self.sentence_tokenizer.tokenize(text)
|
||||
if self.shuffle:
|
||||
sentences = sentences[1:-1]
|
||||
self.sentences += sentences
|
||||
|
||||
def next_sample_(self):
|
||||
if self.shuffle:
|
||||
self.total_tokens = 0
|
||||
sample_ind = self.gen.choice(len(self.dataset))
|
||||
sample = self.dataset[int(sample_ind)]['text']
|
||||
else:
|
||||
sample = self.dataset[int(self.sample_ind)]['text']
|
||||
self.sample_ind += 1
|
||||
self.sample_ind = self.sample_ind % len(self.dataset)
|
||||
return sample
|
||||
|
||||
def length_is_ok(self, tokenized):
|
||||
if (self.max_sentence_len is not None
|
||||
and len(tokenized) > self.max_sentence_len):
|
||||
return False
|
||||
if (self.min_sentence_len is not None
|
||||
and len(tokenized) < self.min_sentence_len):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class NoiseInjectionDataset(Dataset):
|
||||
"""Combined dataset for noisy babi QA.
|
||||
|
||||
It's recommended to use sample_size >= 1024 and task_end_pct - task_start_pct >= 0.2
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
task_dataset,
|
||||
noise_sampler,
|
||||
tokenizer,
|
||||
task_start_pct=None, # left border of facts in sample, between 0 and 1
|
||||
task_end_pct=None, # right border of facts in sample, between task_start_pct and 1
|
||||
sample_size=1024,
|
||||
mixed_length_ratio=0.0, # used for mixed length curriculum, prob for shorter samples
|
||||
random_seed=42,
|
||||
):
|
||||
self.task_dataset = task_dataset
|
||||
self.noise_sampler = noise_sampler
|
||||
self.sample_size = sample_size
|
||||
self.mixed_length_ratio = mixed_length_ratio
|
||||
self.tokenizer = tokenizer
|
||||
self.task_start_pct = task_start_pct
|
||||
self.task_end_pct = task_end_pct
|
||||
if random_seed:
|
||||
self.gen = np.random.default_rng(seed=random_seed)
|
||||
|
||||
def __getitem__(self, ind):
|
||||
sample = self.task_dataset[ind]
|
||||
facts_tok = self.tokenizer(list(sample['facts']))['input_ids']
|
||||
question_tok = self.tokenizer(sample['question'])['input_ids']
|
||||
answer_tok = self.tokenizer(sample['answer'])['input_ids']
|
||||
|
||||
sample_size = self.get_sample_size()
|
||||
task_len = sum_lengths(facts_tok)
|
||||
background_text_len = sample_size - task_len
|
||||
background_text = self.noise_sampler.get_sample(background_text_len)
|
||||
sample['background_text'] = background_text
|
||||
|
||||
if (self.task_start_pct is None
|
||||
and self.task_end_pct is None): # if fact position unspecified
|
||||
possible_positions = range(len(background_text) + 1)
|
||||
else:
|
||||
task_start_ind = int(sample_size * self.task_start_pct)
|
||||
task_end_ind = int(sample_size * self.task_end_pct)
|
||||
total_facts_len = sum_lengths(facts_tok)
|
||||
|
||||
possible_positions = [] # where can we insert facts?
|
||||
current_length = 0
|
||||
for i, text in enumerate(background_text):
|
||||
if (current_length >= task_start_ind) and (
|
||||
current_length < task_end_ind - total_facts_len):
|
||||
possible_positions.append(i)
|
||||
current_length += len(text)
|
||||
|
||||
if len(possible_positions) == 0:
|
||||
raise IndexError(
|
||||
f'Unable to insert facts in specified place: {self.task_start_pct, self.task_end_pct}.'
|
||||
f'Total fact length: {total_facts_len}, '
|
||||
f'sentences length: {[len(t) for t in background_text]}. '
|
||||
f'Make the range wider or increase the sample size.')
|
||||
|
||||
fact_positions = self.gen.choice(possible_positions, len(facts_tok))
|
||||
fact_positions.sort()
|
||||
sample['fact_positions'] = (
|
||||
fact_positions # positions of facts between noise sentences
|
||||
)
|
||||
|
||||
updated_sample = [[] for _ in range(len(background_text) + 1)]
|
||||
for fact, pos in zip(facts_tok, fact_positions):
|
||||
updated_sample[pos].append(fact)
|
||||
|
||||
for i, s in enumerate(background_text):
|
||||
updated_sample[i].append(s)
|
||||
|
||||
flat = [i for s in updated_sample for i in s]
|
||||
tokens = [i for s in flat for i in s]
|
||||
|
||||
sample['input_tokens'] = tokens
|
||||
sample['question_tokens'] = question_tok
|
||||
sample['target_tokens'] = answer_tok
|
||||
|
||||
return sample
|
||||
|
||||
def __len__(self):
|
||||
return len(self.task_dataset)
|
||||
|
||||
def get_sample_size(self):
|
||||
if isinstance(self.sample_size, list):
|
||||
if self.gen.random() > self.mixed_length_ratio:
|
||||
return self.gen.choice(self.sample_size)
|
||||
return max(self.sample_size)
|
||||
else:
|
||||
return self.sample_size
|
516
opencompass/datasets/babilong/prompts.py
Normal file
516
opencompass/datasets/babilong/prompts.py
Normal file
@ -0,0 +1,516 @@
|
||||
# flake8: noqa: E501
|
||||
SYSTEM_TEMPLATE = '{instruction}\n\n{examples}\n\n{post_prompt}'
|
||||
USER_TEMPLATE = '<context>\n{context}\n</context>\n\nQuestion: {question}'
|
||||
DEFAULT_TEMPLATE = f'{SYSTEM_TEMPLATE}\n\n{USER_TEMPLATE}'
|
||||
|
||||
CUSTOM_SYSTEM_PROMPTS = {
|
||||
# https://github.com/dvlab-research/LongLoRA/blob/2345c6d030f61ac3a031906386a103a5b05e0e6f/inference.py#L18
|
||||
'LONGLORA_LLAMA2':
|
||||
'You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. '
|
||||
'Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. '
|
||||
'Please ensure that your responses are socially unbiased and positive in nature.\n\n'
|
||||
'If a question does not make any sense, or is not factually coherent, explain why instead of answering '
|
||||
'something not correct. If you don\'t know the answer to a question, please don\'t share false information.'
|
||||
}
|
||||
|
||||
|
||||
def get_formatted_input(
|
||||
context,
|
||||
question,
|
||||
examples,
|
||||
instruction,
|
||||
post_prompt,
|
||||
template=DEFAULT_TEMPLATE,
|
||||
):
|
||||
# pre_prompt - general instruction
|
||||
# examples - in-context examples
|
||||
# post_prompt - any additional instructions after examples
|
||||
# context - text to use for qa
|
||||
# question - question to answer based on context
|
||||
formatted_input = template.format(
|
||||
instruction=instruction,
|
||||
examples=examples,
|
||||
post_prompt=post_prompt,
|
||||
context=context.strip(),
|
||||
question=question,
|
||||
)
|
||||
return formatted_input.strip()
|
||||
|
||||
|
||||
DEFAULT_PROMPTS = {
|
||||
'qa1': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about positions of different persons hidden in some random text '
|
||||
'and a question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Charlie went to the hallway. Judith come back to the kitchen. Charlie travelled to balcony. '
|
||||
'Where is Charlie?\n'
|
||||
'Answer: The most recent location of Charlie is balcony.\n'
|
||||
'</example>\n\n'
|
||||
'<example>\n'
|
||||
'Alan moved to the garage. Charlie went to the beach. Alan went to the shop. Rouse '
|
||||
'travelled to balcony. Where is Alan?\n'
|
||||
'Answer: The most recent location of Alan is shop.\n'
|
||||
'</example>',
|
||||
'post_prompt':
|
||||
'Always return your answer in the following format: '
|
||||
'The most recent location of ’person’ is ’location’. Do not write anything else after that.',
|
||||
},
|
||||
'qa2': {
|
||||
'instruction':
|
||||
'I give you context with the facts about locations and actions of different persons '
|
||||
'hidden in some random text and a question.'
|
||||
'You need to answer the question based only on the information from the facts.\n'
|
||||
'If a person got an item in the first location and travelled to the second location '
|
||||
'the item is also in the second location. '
|
||||
'If a person dropped an item in the first location and moved to the second location '
|
||||
'the item remains in the first location.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Charlie went to the kitchen. Charlie got a bottle. Charlie moved to the balcony. '
|
||||
'Where is the bottle?\n'
|
||||
'Answer: The bottle is in the balcony.\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Alan moved to the garage. Alan got a screw driver. Alan moved to the kitchen. Where '
|
||||
'is the screw driver?\n'
|
||||
'Answer: The screw driver is in the kitchen.\n'
|
||||
'</example>',
|
||||
'post_prompt':
|
||||
'Always return your answer in the following format: The ’item’ is in ’location’. '
|
||||
'Do not write anything else after that.',
|
||||
},
|
||||
'qa3': {
|
||||
'instruction':
|
||||
'I give you context with the facts about locations and actions of different persons '
|
||||
'hidden in some random text and a question. '
|
||||
'You need to answer the question based only on the information from the facts.\n'
|
||||
'If a person got an item in the first location and travelled to the second location '
|
||||
'the item is also in the second location. '
|
||||
'If a person dropped an item in the first location and moved to the second location '
|
||||
'the item remains in the first location.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'John journeyed to the bedroom. Mary grabbed the apple. Mary went back to the bathroom. '
|
||||
'Daniel journeyed to the bedroom. Daniel moved to the garden. Mary travelled to the kitchen. '
|
||||
'Where was the apple before the kitchen?\n'
|
||||
'Answer: Before the kitchen the apple was in the bathroom.\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'John went back to the bedroom. John went back to the garden. John went back to the kitchen. '
|
||||
'Sandra took the football. Sandra travelled to the garden. Sandra journeyed to the bedroom. '
|
||||
'Where was the football before the bedroom?\n'
|
||||
'Answer: Before the bedroom the football was in the garden.\n'
|
||||
'</example>',
|
||||
'post_prompt':
|
||||
'Always return your answer in the following format: '
|
||||
'Before the $location_1$ the $item$ was in the $location_2$. Do not write anything else after that.',
|
||||
},
|
||||
'qa4': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about different people, their location and actions, hidden in '
|
||||
'some random text and a question. '
|
||||
'You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'The hallway is south of the kitchen. The bedroom is north of the kitchen. '
|
||||
'What is the kitchen south of?\n'
|
||||
'Answer: bedroom\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'The garden is west of the bedroom. The bedroom is west of the kitchen. What is west of the bedroom?\n'
|
||||
'Answer: garden\n'
|
||||
'</example>',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - location. Do not write anything else after that.',
|
||||
},
|
||||
'qa5': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about locations and their relations hidden in some random text '
|
||||
'and a question. You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Mary picked up the apple there. Mary gave the apple to Fred. Mary moved to the bedroom. '
|
||||
'Bill took the milk there. Who did Mary give the apple to?\n'
|
||||
'Answer: Fred\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Jeff took the football there. Jeff passed the football to Fred. Jeff got the milk there. '
|
||||
'Bill travelled to the bedroom. Who gave the football?\n'
|
||||
'Answer: Jeff\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Fred picked up the apple there. Fred handed the apple to Bill. Bill journeyed to the bedroom. '
|
||||
'Jeff went back to the garden. What did Fred give to Bill?\n'
|
||||
'Answer: apple\n'
|
||||
'</example>',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa6': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and their locations hidden in some random text and a '
|
||||
'question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'John travelled to the hallway. John travelled to the garden. Is John in the garden?\n'
|
||||
'Answer: yes\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Mary went to the office. Daniel journeyed to the hallway. Mary went to the bedroom. '
|
||||
'Sandra went to the garden. Is Mary in the office?\n'
|
||||
'Answer: no\n'
|
||||
'</example>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - $yes$ or $no$. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa7': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and objects they carry, hidden in some random text '
|
||||
'and a question. You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Daniel went to the bedroom. Daniel got the apple there. How many objects is Daniel carrying?\n'
|
||||
'Answer: one\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Mary grabbed the apple there. Mary gave the apple to John. How many objects is Mary carrying?\n'
|
||||
'Answer: none\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Sandra travelled to the hallway. Sandra picked up the milk there. Sandra took the apple there. '
|
||||
'Mary travelled to the garden. How many objects is Sandra carrying?\n'
|
||||
'Answer: two\n'
|
||||
'</example>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - $none$ or $number_of_objects$. '
|
||||
'Do not write anything else after that. Do not explain your answer.',
|
||||
},
|
||||
'qa8': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and objects they carry, hidden in some random text '
|
||||
'and a question. You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Sandra travelled to the garden. Mary grabbed the milk there. What is Mary carrying?\n'
|
||||
'Answer: milk\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Mary travelled to the kitchen. Sandra travelled to the office. John travelled to the office. '
|
||||
'Sandra discarded the milk there. What is Sandra carrying?\n'
|
||||
'Answer: nothing\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Daniel grabbed the apple there. Mary went to the office. Daniel moved to the garden. '
|
||||
'Daniel grabbed the milk there. Mary went to the kitchen. What is Daniel carrying?\n'
|
||||
'Answer: apple,milk\n'
|
||||
'</example>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one or two words: $nothing$ or $object$ or $object_1$, $object_2$. '
|
||||
'Do not write anything else. Do not explain your answer.',
|
||||
},
|
||||
'qa9': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and their locations hidden in some random text and '
|
||||
'a question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'John is not in the bathroom. Sandra is not in the bedroom. Is John in the bathroom?\n'
|
||||
'Answer: no\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Mary journeyed to the kitchen. John is in the bedroom. Sandra is not in the garden. '
|
||||
'Is Mary in the kitchen?\n'
|
||||
'Answer: yes\n'
|
||||
'</example>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - $yes$ or $no$. Do not write anything else. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa10': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and their locations hidden in some random text and a '
|
||||
'question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Bill is in the kitchen. Julie is either in the school or the cinema. Is Bill in the bedroom?\n'
|
||||
'Answer: no\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Fred is in the bedroom. Mary is either in the school or the cinema. Is Mary in the school?\n'
|
||||
'Answer: maybe\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Fred is either in the kitchen or the park. Bill moved to the cinema. Is Bill in the cinema?\n'
|
||||
'Answer: yes\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - $yes$ or $no$ or $maybe$. Do not write anything else. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa11': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and their locations hidden in some random text and a '
|
||||
'question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Daniel journeyed to the hallway. After that he journeyed to the garden. Where is Daniel?\n'
|
||||
'Answer: garden\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Mary moved to the office. Afterwards she journeyed to the kitchen. Daniel went to the hallway. '
|
||||
'Then he journeyed to the garden. Where is Mary?\n'
|
||||
'Answer: kitchen\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Sandra moved to the kitchen. After that she went back to the hallway. Sandra moved to the bedroom. '
|
||||
'Then she went to the hallway. Mary moved to the bedroom. Afterwards she travelled to the bathroom. '
|
||||
'Where is Sandra?\n'
|
||||
'Answer: hallway\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - location. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa12': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and their locations hidden in some random text and a '
|
||||
'question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Mary and Daniel travelled to the bathroom. John and Daniel travelled to the office. Where is Daniel?\n'
|
||||
'Answer: office\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Sandra and Mary went back to the office. Daniel and Sandra went to the bedroom. Sandra and Mary travelled to the hallway. '
|
||||
'John and Mary went to the kitchen. Where is Mary?\n'
|
||||
'Answer: kitchen\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Daniel and Sandra went back to the hallway. Daniel and John moved to the office. Daniel and John moved to the garden. '
|
||||
'Daniel and Mary went back to the bathroom. Daniel and John went back to the kitchen. Daniel and Sandra went to the bathroom. '
|
||||
'Where is John?\n'
|
||||
'Answer: kitchen\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - location. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa13': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and their locations hidden in some random text and a '
|
||||
'question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Mary and Daniel travelled to the bathroom. Then they journeyed to the hallway. Where is Daniel?\n'
|
||||
'Answer: hallway\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Daniel and Sandra travelled to the kitchen. After that they journeyed to the hallway. Mary and Daniel travelled to the bedroom. '
|
||||
'After that they travelled to the hallway. Where is Sandra?\n'
|
||||
'Answer: hallway\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'John and Mary moved to the bathroom. Then they travelled to the office. John and Mary went to the kitchen. '
|
||||
'Afterwards they went to the bedroom. John and Sandra moved to the bathroom. Following that they went back to the kitchen. '
|
||||
'Where is Mary?\n'
|
||||
'Answer: bedroom\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - location. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa14': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people and their locations hidden in some random text and a '
|
||||
'question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Bill went back to the cinema yesterday. Julie went to the school this morning. Fred went to the park yesterday. '
|
||||
'Yesterday Julie went to the office. Where was Julie before the school?\n'
|
||||
'Answer: office\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'This morning Fred went to the kitchen. Fred journeyed to the bedroom yesterday. Mary travelled to the bedroom this morning. '
|
||||
'Yesterday Mary went to the cinema. Where was Mary before the bedroom?\n'
|
||||
'Answer: cinema\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Yesterday Julie went back to the park. Julie went to the bedroom this morning. Bill journeyed to the cinema yesterday. '
|
||||
'This morning Bill went back to the park. This evening Julie went to the school. This afternoon Julie went back to the park. '
|
||||
'Where was Julie before the bedroom?\n'
|
||||
'Answer: park\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - location. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa15': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about animals, their names and relations. The facts and a question '
|
||||
'are hidden in some random text. You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Mice are afraid of wolves. Gertrude is a mouse. Cats are afraid of sheep. '
|
||||
'Winona is a mouse. Sheep are afraid of wolves. Emily is a mouse. Jessica is a wolf. '
|
||||
'What is gertrude afraid of?\n'
|
||||
'Answer: wolf\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Mice are afraid of wolves. Gertrude is a mouse. Cats are afraid of sheep. '
|
||||
'Winona is a mouse. Sheep are afraid of wolves. Emily is a mouse. Jessica is a wolf. '
|
||||
'What is jessica afraid of?\n'
|
||||
'Answer: cat\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Mice are afraid of cats. Wolves are afraid of sheep. Emily is a wolf. '
|
||||
'Cats are afraid of sheep. Gertrude is a wolf. Sheep are afraid of cats. Winona is a wolf. '
|
||||
'What is emily afraid of?\n'
|
||||
'Answer: sheep\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - an animal species. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa16': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about animals, their names and colors. The facts and a question '
|
||||
'are hidden in some random text. You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Lily is a frog. Bernhard is a frog. Bernhard is green. Brian is a lion. Brian is white. '
|
||||
'Julius is a swan. Julius is green. Lily is green. Greg is a swan. What color is Greg?\n'
|
||||
'Answer: green\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Julius is a lion. Lily is a rhino. Bernhard is a swan. Lily is white. Bernhard is green. '
|
||||
'Greg is a rhino. Greg is gray. Julius is white. Brian is a lion. What color is Brian?\n'
|
||||
'Answer: white\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Brian is a rhino. Julius is a lion. Bernhard is a lion. Greg is a swan. Brian is gray. '
|
||||
'Greg is white. Lily is a rhino. Bernhard is yellow. Lily is gray. What color is Julius?\n'
|
||||
'Answer: yellow\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - a color. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa17': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about different figures, their location and colors, hidden in '
|
||||
'some random text and a question. '
|
||||
'You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'The triangle is above the pink rectangle. The blue square is to the left of the triangle. '
|
||||
'Is the pink rectangle to the right of the blue square?\n'
|
||||
'Answer: yes\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'The red sphere is to the left of the yellow square. The red sphere is below the pink rectangle. '
|
||||
'Is the pink rectangle to the left of the yellow square?\n'
|
||||
'Answer: yes\n'
|
||||
'</example>'
|
||||
'<example>\n'
|
||||
'The red sphere is above the pink rectangle. The red sphere is to the right of the red square. '
|
||||
'Is the pink rectangle above the red square?\n'
|
||||
'Answer: no\n'
|
||||
'</example>',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - $yes$ or $no$. Do not write anything else. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa18': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about different objects and their sizes, hidden in '
|
||||
'some random text and a question. '
|
||||
'You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'The box of chocolates fits inside the chest. The box is bigger than the chest. The box is bigger than the suitcase. '
|
||||
'The suitcase fits inside the box. The container is bigger than the box of chocolates. Does the box fit in the box of chocolates?\n'
|
||||
'Answer: no\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'The suitcase is bigger than the container. The container fits inside the box. The chest is bigger than the chocolate.'
|
||||
'The suitcase fits inside the box. The chest fits inside the box. Does the chocolate fit in the box?\n'
|
||||
'Answer: yes\n'
|
||||
'</example>'
|
||||
'<example>\n'
|
||||
'The chocolate fits inside the box of chocolates. The suitcase fits inside the box. The chocolate fits inside the box. '
|
||||
'The box is bigger than the box of chocolates. The suitcase is bigger than the box of chocolates. Is the chocolate bigger than the box?\n'
|
||||
'Answer: no\n'
|
||||
'</example>',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - $yes$ or $no$. Do not write anything else. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
'qa19': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about different places and their locations, hidden in '
|
||||
'some random text and a question. '
|
||||
'You need to answer the question based only on the information from the facts.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'The office is east of the hallway. The kitchen is north of the office. The garden is west of the bedroom. '
|
||||
'The office is west of the garden. The bathroom is north of the garden. How do you go from the kitchen to the garden?\n'
|
||||
'Answer: s,e\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'The bedroom is west of the hallway. The office is east of the garden. The garden is north of the kitchen. '
|
||||
'The kitchen is north of the bathroom. The hallway is west of the garden. How do you go from the kitchen to the hallway?\n'
|
||||
'Answer: n,w\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'The bedroom is south of the hallway. The bathroom is east of the office. The kitchen is west of the garden. '
|
||||
'The garden is south of the office. The office is south of the bedroom. How do you go from the garden to the bedroom?\n'
|
||||
'Answer: n,n\n'
|
||||
'</example>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only two letters, separated by a comma - ordinal directions. You can choose the letters from '
|
||||
'$n$, $s$, $e$ and $w$. Do not write anything else after that.',
|
||||
},
|
||||
'qa20': {
|
||||
'instruction':
|
||||
'I will give you context with the facts about people, their locations and condition hidden in some random text and a '
|
||||
'question. You need to answer the question based only on the information from the facts. '
|
||||
'If a person was in different locations, use the latest location the person was in to answer the question.',
|
||||
'examples':
|
||||
'<example>\n'
|
||||
'Sumit is tired. Where will sumit go?\n'
|
||||
'Answer: bedroom\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Yann is hungry. Yann journeyed to the kitchen. Why did yann go to the kitchen?\n'
|
||||
'Answer: hungry\n'
|
||||
'</example>\n'
|
||||
'<example>\n'
|
||||
'Antoine is thirsty. Yann is tired. Yann went back to the bedroom. Yann picked up the pajamas there.'
|
||||
'Jason is thirsty. Antoine went back to the kitchen. Why did antoine go to the kitchen?\n'
|
||||
'Answer: thirsty\n'
|
||||
'</example>\n'
|
||||
'<context>\n',
|
||||
'post_prompt':
|
||||
'Your answer should contain only one word - a person condition or a place. Do not write anything else after that. '
|
||||
'Do not explain your answer.',
|
||||
},
|
||||
}
|
@ -327,6 +327,11 @@ DATASETS_MAPPING = {
|
||||
"hf_id": "",
|
||||
"local": "./data/mmmlu_lite",
|
||||
},
|
||||
"opencompass/babilong": {
|
||||
"ms_id": "",
|
||||
"hf_id": "",
|
||||
"local": "./data/babilong/data/",
|
||||
},
|
||||
}
|
||||
|
||||
DATASETS_URL = {
|
||||
@ -526,4 +531,8 @@ DATASETS_URL = {
|
||||
"url": "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/WikiBench.zip",
|
||||
"md5": "6dac1d1a3133fe1effff185cbf71d928",
|
||||
},
|
||||
"/babilong":{
|
||||
"url": "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/babilong.zip",
|
||||
"md5": "e400864c31bc58d29eaa3e199751f99b",
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ fuzzywuzzy
|
||||
h5py
|
||||
huggingface_hub<=0.24.7
|
||||
immutabledict
|
||||
importlib-metadata
|
||||
jieba
|
||||
json5
|
||||
mmengine-lite
|
||||
|
Loading…
Reference in New Issue
Block a user