mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
Merge d679be0cf6
into d572761cef
This commit is contained in:
commit
dbffdff3f0
@ -823,6 +823,12 @@
|
||||
paper: https://arxiv.org/pdf/1704.04683
|
||||
configpath: opencompass/configs/datasets/race/race_gen.py
|
||||
configpath_llmjudge: ''
|
||||
- rbench:
|
||||
name: R-Bench
|
||||
category: Reasoning
|
||||
paper: https://arxiv.org/pdf/2505.02018
|
||||
configpath: opencompass/configs/datasets/R-Bench/rbench_gen_37cbaf8.py
|
||||
configpath_llmjudge: ''
|
||||
- realtoxicprompts:
|
||||
name: RealToxicPrompts
|
||||
category: Safety
|
||||
|
72
opencompass/configs/datasets/R-Bench/R-Bench.md
Normal file
72
opencompass/configs/datasets/R-Bench/R-Bench.md
Normal file
@ -0,0 +1,72 @@
|
||||
# R-Bench
|
||||
|
||||
## Introduction
|
||||
|
||||
The following introduction comes from the description on the [R-Bench official website](https://evalmodels.github.io/rbench/):
|
||||
|
||||
```
|
||||
R-Bench is a graduate-level multi-disciplinary benchmark for evaluating the complex reasoning capabilities of Large Language Models (LLMs) and Multimodal Large Language Models (MLLMs). R stands for Reasoning.
|
||||
```
|
||||
|
||||
According to statistics on R-Bench, the benchmark spans 19 departments, including mathematics, physics, biology, computer science, and chemistry, covering over 100 subjects such as Inorganic Chemistry, Chemical Reaction Kinetics, and Electromagnetism. It features 1,094 questions designed for testing language models and 665 questions specifically tailored for evaluating multimodal reasoning capabilities, available in both English and Chinese.
|
||||
|
||||
These questions are meticulously curated to ensure rigorous difficulty calibration, subject balance, and cross-linguistic alignment, enabling the assessment to be an Olympiad-level multi-disciplinary benchmark.
|
||||
|
||||
## Official Links
|
||||
|
||||
### Paper
|
||||
|
||||
[R-Bench: Graduate-level Multi-disciplinary Benchmarks for LLM & MLLM Complex Reasoning Evaluation](https://arxiv.org/abs/2505.02018)
|
||||
|
||||
## Evaluation Results
|
||||
|
||||
### Language Model Results
|
||||
|
||||
```
|
||||
Model Source Date Average RBench-T RBench-T (zh)
|
||||
------------------------ -------------------------------------------------- ---------- ---------- ---------- ---------------
|
||||
OpenAI o1 🥇 https://openai.com/o1/ 2024-12-17 69.6 69.0 70.1
|
||||
Gemini2.0-Flash-Thinking 🥈 https://deepmind.google/technologies/gemini/flash-thinking/ 2025-01-21 68.0 68.4 67.5
|
||||
Doubao1.5Pro 🥉 https://www.volcengine.com/product/doubao 2025-01-21 62.7 62.0 63.4
|
||||
GPT-4o https://openai.com/index/hello-gpt-4o/ 2024-11-20 52.6 53.6 51.6
|
||||
Claude3.5-sonnet https://www.anthropic.com/news/claude-3-5-sonnet 2024-06-20 57.4 57.5 57.3
|
||||
Qwen2.5-72B https://github.com/QwenLM/Qwen2.5 2024-09-19 52.9 53.7 52.0
|
||||
Qwen2.5-32B https://github.com/QwenLM/Qwen2.5 2024-09-19 50.4 50.8 49.9
|
||||
Qwen2.5-7B https://github.com/QwenLM/Qwen2.5 2024-09-19 44.1 43.6 44.5
|
||||
```
|
||||
|
||||
### Multimodal Model Results
|
||||
|
||||
```
|
||||
Model Source Date Average RBench-M RBench-M (zh)
|
||||
------------------------ -------------------------------------------------- ---------- ---------- ---------- ---------------
|
||||
OpenAI o1 🥇 https://openai.com/o1/ 2024-12-17 53.1 53.2 53.0
|
||||
Doubao1.5Pro 🥈 https://www.volcengine.com/product/doubao 2025-01-21 40.2 37.9 42.4
|
||||
Claude-3-5-sonnet 🥉 https://www.anthropic.com/news/claude-3-5-sonnet 2025-04-10 39.0 39.7 38.3
|
||||
GPT-4o https://openai.com/index/hello-gpt-4o/ 2024-11-20 33.3 33.4 33.2
|
||||
Qwen2.5-72B https://github.com/QwenLM/Qwen2.5 2024-09-19 25.4 25.1 25.7
|
||||
Qwen2.5-7B https://github.com/QwenLM/Qwen2.5 2024-09-19 21.0 19.6 22.3
|
||||
```
|
||||
|
||||
Note:
|
||||
- RBench-T: Text-only questions for language models test
|
||||
- RBench-M: Multimodal questions for multimodal models test
|
||||
- The values in the table represent the Top-1 accuracy, in %
|
||||
- zh indicates the Chinese version
|
||||
|
||||
## Reference
|
||||
|
||||
```
|
||||
@inproceedings{
|
||||
guo2025rbench,
|
||||
title={RBench: Graduate-level Multi-disciplinary Benchmarks for
|
||||
LLM & MLLM Complex Reasoning Evaluation},
|
||||
author={Meng-Hao Guo, Jiajun Xu, Yi Zhang, Jiaxi Song, Haoyang Peng, Yi-Xuan Deng,
|
||||
Xinzhi Dong, Kiyohiro Nakayama, Zhengyang Geng, Chen Wang, Bolin Ni, Guo-Wei Yang,
|
||||
Yongming Rao, Houwen Peng, Han Hu, Gordon Wetzstein, Shi-min Hu},
|
||||
year={2025},
|
||||
eprint={2505.02018},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CV},
|
||||
url={https://arxiv.org/abs/2505.02018},
|
||||
}
|
76
opencompass/configs/datasets/R-Bench/rbench_gen_37cbaf8.py
Normal file
76
opencompass/configs/datasets/R-Bench/rbench_gen_37cbaf8.py
Normal file
@ -0,0 +1,76 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
|
||||
from opencompass.datasets import RBenchDataset
|
||||
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||
|
||||
RBench_reader_cfg = dict(input_columns=[
|
||||
'RBench_Question_Input', 'RBench_Option_A', 'RBench_Option_B',
|
||||
'RBench_Option_C', 'RBench_Option_D', 'RBench_Option_E', 'RBench_Option_F'
|
||||
],
|
||||
output_column='target')
|
||||
|
||||
RBench_datasets = []
|
||||
|
||||
systemp_prompt_en = "Answer the following single choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEF). Think step by step before answering."
|
||||
|
||||
systemp_prompt_zh = "回答以下单选题。答案的最后一行应采用以下格式:“答案是$LETTER”(不带引号),其中 LETTER 是选项之一(例如 ABCDEF 之一)。回答前请逐步思考。"
|
||||
|
||||
RBench_infer_en_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=
|
||||
f'{systemp_prompt_en}\nQuestion: {{RBench_Question_Input}}\nA. {{RBench_Option_A}}\nB. {{RBench_Option_B}}\nC. {{RBench_Option_C}}\nD. {{RBench_Option_D}}\nE. {{RBench_Option_E}}\nF. {{RBench_Option_F}}\nAnswer: '
|
||||
),
|
||||
], ),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
RBench_infer_zh_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=
|
||||
f'{systemp_prompt_zh}\n问题: {{RBench_Question_Input}}\nA. {{RBench_Option_A}}\nB. {{RBench_Option_B}}\nC. {{RBench_Option_C}}\nD. {{RBench_Option_D}}\nE. {{RBench_Option_E}}\nF. {{RBench_Option_F}}\n答案: '
|
||||
),
|
||||
], ),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
RBench_eval_cfg = dict(evaluator=dict(type=AccwithDetailsEvaluator),
|
||||
pred_postprocessor=dict(type=first_option_postprocess,
|
||||
options='ABCDEF'))
|
||||
|
||||
RBench_datasets.append(
|
||||
dict(
|
||||
abbr=f'R-Bench_en',
|
||||
type=RBenchDataset,
|
||||
path='opencompass/R-Bench',
|
||||
name='R-Bench',
|
||||
subset='en',
|
||||
reader_cfg=RBench_reader_cfg,
|
||||
infer_cfg=RBench_infer_en_cfg,
|
||||
eval_cfg=RBench_eval_cfg,
|
||||
))
|
||||
|
||||
RBench_datasets.append(
|
||||
dict(
|
||||
abbr=f'R-Bench_zh',
|
||||
type=RBenchDataset,
|
||||
path='opencompass/R-Bench',
|
||||
name='R-Bench',
|
||||
subset='zh',
|
||||
reader_cfg=RBench_reader_cfg,
|
||||
infer_cfg=RBench_infer_zh_cfg,
|
||||
eval_cfg=RBench_eval_cfg,
|
||||
))
|
@ -131,6 +131,7 @@ from .qasper import * # noqa: F401, F403
|
||||
from .qaspercut import * # noqa: F401, F403
|
||||
from .QuALITY import * # noqa: F401, F403
|
||||
from .race import * # noqa: F401, F403
|
||||
from .rbench import * # noqa: F401, F403
|
||||
from .realtoxicprompts import * # noqa: F401, F403
|
||||
from .reasonbench import ReasonBenchDataset # noqa: F401, F403
|
||||
from .record import * # noqa: F401, F403
|
||||
|
37
opencompass/datasets/rbench.py
Normal file
37
opencompass/datasets/rbench.py
Normal file
@ -0,0 +1,37 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from .base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class RBenchDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load_single(subset='en'):
|
||||
raw_data = []
|
||||
ds = load_dataset('R-Bench/R-Bench', f'rbench-t_{subset}')
|
||||
|
||||
for data in ds['test']:
|
||||
raw_data.append({
|
||||
'RBench_Question_Input': data['question'],
|
||||
'RBench_Option_A': data['A'],
|
||||
'RBench_Option_B': data['B'],
|
||||
'RBench_Option_C': data['C'],
|
||||
'RBench_Option_D': data['D'],
|
||||
'RBench_Option_E': data['E'],
|
||||
'RBench_Option_F': data['F'],
|
||||
'target': data['answer'],
|
||||
})
|
||||
return Dataset.from_list(raw_data)
|
||||
|
||||
@staticmethod
|
||||
def load(subset='en', **kwargs):
|
||||
test_dataset = RBenchDataset.load_single(subset=subset)
|
||||
return test_dataset
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
dataset = RBenchDataset.load()
|
||||
print(dataset)
|
@ -235,6 +235,12 @@ DATASETS_MAPPING = {
|
||||
"hf_id": "opencompass/race",
|
||||
"local": "./data/race/",
|
||||
},
|
||||
# R-Bench
|
||||
"opencompass/R-Bench": {
|
||||
"ms_id": "R-Bench/R-Bench",
|
||||
"hf_id": "R-Bench/R-Bench",
|
||||
"local": "./data/R-Bench",
|
||||
},
|
||||
# SIQA
|
||||
"opencompass/siqa": {
|
||||
"ms_id": "opencompass/siqa",
|
||||
|
Loading…
Reference in New Issue
Block a user