mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
parent
c2d4717be2
commit
89a8a8917b
56
configs/datasets/QuALITY/QuALITY.md
Normal file
56
configs/datasets/QuALITY/QuALITY.md
Normal file
@ -0,0 +1,56 @@
|
||||
# QuALITY
|
||||
## Introduction
|
||||
The following introduction comes from the description in [QuALITY Leaderboard](https://nyu-mll.github.io/quality/)
|
||||
|
||||
```
|
||||
QuALITY is a multiple-choice question answering dataset with context passages in English that have an average length of about 5,000 tokens.
|
||||
```
|
||||
|
||||
These questions were categorized into two levels: easy and hard.
|
||||
|
||||
## Official link
|
||||
|
||||
### Paper
|
||||
|
||||
[QuALITY: Question Answering with Long Input Texts, Yes!](https://arxiv.org/pdf/2112.08608.pdf)
|
||||
|
||||
### Repository
|
||||
|
||||
[nyu-mll/quality](https://github.com/nyu-mll/quality)
|
||||
|
||||
|
||||
## Evaluation results
|
||||
|
||||
```
|
||||
dataset version metric mode qwen1.5-7b-chat-hf qwen1.5-14b-chat-hf qwen1.5-72b-chat-hf
|
||||
--------- --------- -------- ------ -------------------- --------------------- ---------------------
|
||||
QuALITY ed2404 easy_acc gen 62.39 68.17 76.69
|
||||
QuALITY ed2404 hard_acc gen 49.27 56.22 63.96
|
||||
QuALITY ed2404 all_acc gen 54.65 60.88 68.84
|
||||
```
|
||||
|
||||
## Reference
|
||||
```
|
||||
@inproceedings{pang-etal-2022-quality,
|
||||
title = "{Q}u{ALITY}: Question Answering with Long Input Texts, Yes!",
|
||||
author = "Pang, Richard Yuanzhe and
|
||||
Parrish, Alicia and
|
||||
Joshi, Nitish and
|
||||
Nangia, Nikita and
|
||||
Phang, Jason and
|
||||
Chen, Angelica and
|
||||
Padmakumar, Vishakh and
|
||||
Ma, Johnny and
|
||||
Thompson, Jana and
|
||||
He, He and
|
||||
Bowman, Samuel",
|
||||
booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
||||
month = jul,
|
||||
year = "2022",
|
||||
address = "Seattle, United States",
|
||||
publisher = "Association for Computational Linguistics",
|
||||
url = "https://aclanthology.org/2022.naacl-main.391",
|
||||
pages = "5336--5358",
|
||||
abstract = "To enable building and testing models on long-document comprehension, we introduce QuALITY, a multiple-choice QA dataset with context passages in English that have an average length of about 5,000 tokens, much longer than typical current models can process. Unlike in prior work with passages, our questions are written and validated by contributors who have read the entire passage, rather than relying on summaries or excerpts. In addition, only half of the questions are answerable by annotators working under tight time constraints, indicating that skimming and simple search are not enough to consistently perform well. Our baseline models perform poorly on this task (55.4{\%}) and significantly lag behind human performance (93.5{\%}).",
|
||||
}
|
||||
```
|
4
configs/datasets/QuALITY/QuALITY_gen.py
Normal file
4
configs/datasets/QuALITY/QuALITY_gen.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .QuALITY_gen_c407cb import QuALITY_datasets # noqa: F401, F403
|
38
configs/datasets/QuALITY/QuALITY_gen_c407cb.py
Normal file
38
configs/datasets/QuALITY/QuALITY_gen_c407cb.py
Normal file
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import QuALITYDataset, QuALITYEvaluator
|
||||
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||
|
||||
QuALITY_reader_cfg = dict(
|
||||
input_columns=['article', 'question', 'A', 'B', 'C', 'D'],
|
||||
output_column='gold_label',
|
||||
)
|
||||
|
||||
QuALITY_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role="HUMAN",
|
||||
prompt=
|
||||
"Read the article, and answer the question.\n\nArticle:\n{article}\n\nQ: {question}\n\nA. {A}\nB. {B}\nC. {C}\nD. {D}"
|
||||
),
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
QuALITY_eval_cfg = dict(
|
||||
evaluator=dict(type=QuALITYEvaluator),
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
|
||||
pred_role='BOT')
|
||||
|
||||
QuALITY_datasets = [
|
||||
dict(
|
||||
abbr='QuALITY',
|
||||
type=QuALITYDataset,
|
||||
path='./data/QuALITY/QuALITY.v1.0.1.htmlstripped.dev',
|
||||
reader_cfg=QuALITY_reader_cfg,
|
||||
infer_cfg=QuALITY_infer_cfg,
|
||||
eval_cfg=QuALITY_eval_cfg),
|
||||
]
|
59
opencompass/datasets/QuALITY.py
Normal file
59
opencompass/datasets/QuALITY.py
Normal file
@ -0,0 +1,59 @@
|
||||
import json
|
||||
|
||||
from datasets import Dataset
|
||||
|
||||
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from .base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class QuALITYDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str):
|
||||
dataset_list = []
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
line = json.loads(line)
|
||||
for question in line['questions']:
|
||||
dataset_list.append({
|
||||
'article':
|
||||
line['article'],
|
||||
'question':
|
||||
question['question'],
|
||||
'A':
|
||||
question['options'][0],
|
||||
'B':
|
||||
question['options'][1],
|
||||
'C':
|
||||
question['options'][2],
|
||||
'D':
|
||||
question['options'][3],
|
||||
'gold_label':
|
||||
'ABCD'[question['gold_label'] - 1],
|
||||
'difficult':
|
||||
question['difficult']
|
||||
})
|
||||
return Dataset.from_list(dataset_list)
|
||||
|
||||
|
||||
class QuALITYEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, references, test_set):
|
||||
assert len(predictions) == len(references)
|
||||
easy, hard, all = [], [], []
|
||||
for pred, refer, test in zip(predictions, references, test_set):
|
||||
if pred == refer:
|
||||
answer = True
|
||||
else:
|
||||
answer = False
|
||||
all.append(answer)
|
||||
if test['difficult'] == 0:
|
||||
easy.append(answer)
|
||||
else:
|
||||
hard.append(answer)
|
||||
return dict(easy_acc=sum(easy) / len(easy) * 100,
|
||||
hard_acc=sum(hard) / len(easy) * 100,
|
||||
all_acc=sum(all) / len(all) * 100)
|
@ -78,6 +78,7 @@ from .piqa import * # noqa: F401, F403
|
||||
from .py150 import * # noqa: F401, F403
|
||||
from .qasper import * # noqa: F401, F403
|
||||
from .qaspercut import * # noqa: F401, F403
|
||||
from .QuALITY import * # noqa: F401, F403
|
||||
from .race import * # noqa: F401, F403
|
||||
from .realtoxicprompts import * # noqa: F401, F403
|
||||
from .reasonbench import ReasonBenchDataset # noqa: F401, F403
|
||||
|
Loading…
Reference in New Issue
Block a user