[Dataset] Add SciknowEval Dataset (#2070)

* first

* first

* first

* first

* SciKnowEval

* fix hash

* fix dataset-index & use official llm_judge_postprocess

* fix dataset-index.yml

* use official llmjudge_postprocess

* fix lint

* fix lint

* fix lint

* fix lint

* fix lint

* merge with main

---------

Co-authored-by: Linchen Xiao <xxllcc1993@gmail.com>
This commit is contained in:
huihui1999 2025-05-12 17:23:44 +08:00 committed by GitHub
parent 8aa18df368
commit 345674f700
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 438 additions and 0 deletions

View File

@ -1065,6 +1065,12 @@
paper: https://arxiv.org/pdf/2402.09391 paper: https://arxiv.org/pdf/2402.09391
configpath: opencompass/configs/datasets/SmolInstruct/smolinstruct_gen.py configpath: opencompass/configs/datasets/SmolInstruct/smolinstruct_gen.py
configpath_llmjudge: '' configpath_llmjudge: ''
- SciKnowEval:
name: SciKnowEval
category: Science
paper: https://arxiv.org/abs/2406.09098
configpath: opencompass/configs/datasets/SciKnowEval/SciKnowEval_gen_ebe47d.py
configpath_llmjudge: opencompass/configs/datasets/SciKnowEval/SciKnowEval_llmjudge_gen_ebe47d.py
- internsandbox: - internsandbox:
name: InternSandbox name: InternSandbox
category: Reasoning/Code/Agent category: Reasoning/Code/Agent

View File

@ -0,0 +1,92 @@
from opencompass.datasets import SciKnowEvalDataset, SciKnowEvalEvaluator
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
ZERO_SHOT_PROMPT = '{q4}'
# Reader configuration
reader_cfg = dict(
input_columns=[
'prompt',
'question',
'choices',
'label',
'answerKey',
'type',
'domain',
'details',
'answer',
'q4'
],
output_column='answerKey',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(type=SciKnowEvalEvaluator),
pred_role='BOT',
)
sciknoweval_dataset_biology = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_biology',
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
subset='biology',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
sciknoweval_dataset_chemistry = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_chemistry',
path='hicai-zju/SciKnowEval',
subset='chemistry',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
sciknoweval_dataset_material = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_material',
path='hicai-zju/SciKnowEval',
subset='material',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
sciknoweval_dataset_physics = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_physics',
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
subset='physics',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
)
sciknoweval_datasets = [sciknoweval_dataset_biology, sciknoweval_dataset_chemistry, sciknoweval_dataset_physics, sciknoweval_dataset_material]

View File

@ -0,0 +1,232 @@
from opencompass.datasets import SciKnowEvalDataset
from opencompass.datasets import generic_llmjudge_postprocess
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.evaluator import GenericLLMEvaluator
ZERO_SHOT_PROMPT = '{q4}'
GRADER_TEMPLATE = """
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
Here are some evaluation criteria:
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
A: CORRECT
B: INCORRECT
Just return the letters "A" or "B", with no text around it.
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
<Original Question Begin>: Q: {q4}\n<Original Question End>\n\n
<Gold Target Begin>: \n{answerKey}\n<Gold Target End>\n\n
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
Judging the correctness of candidates' answers:
""".strip()
# Reader configuration
reader_cfg = dict(
input_columns=[
'prompt',
'question',
'choices',
'label',
'answerKey',
'type',
'domain',
'details',
'answer',
'q4'
],
output_column='answerKey',
)
# Inference configuration
infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(
role='HUMAN',
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
# Evaluation configuration
eval_cfg_biology = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=SciKnowEvalDataset,
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
subset='biology',
reader_cfg=reader_cfg,
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
eval_cfg_chemistry = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=SciKnowEvalDataset,
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
subset='chemistry',
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
eval_cfg_material = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=SciKnowEvalDataset,
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
subset='material',
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
eval_cfg_physics = dict(
evaluator=dict(
type=GenericLLMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(
role='SYSTEM',
fallback_role='HUMAN',
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
)
],
round=[
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
],
),
),
dataset_cfg=dict(
type=SciKnowEvalDataset,
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
subset='physics',
),
judge_cfg=dict(),
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
),
)
sciknoweval_dataset_biology = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_biology_llmjudge',
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
subset='biology',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg_biology,
)
sciknoweval_dataset_chemistry = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_chemistry_llmjudge',
path='hicai-zju/SciKnowEval',
subset='chemistry',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg_chemistry,
)
sciknoweval_dataset_material = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_material_llmjudge',
path='hicai-zju/SciKnowEval',
subset='material',
prompt_mode='zero-shot',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg_material,
)
sciknoweval_dataset_physics = dict(
type=SciKnowEvalDataset,
abbr='sciknoweval_physics_llmjudge',
path='hicai-zju/SciKnowEval',
prompt_mode='zero-shot',
subset='physics',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg_physics,
)
sciknoweval_datasets = [sciknoweval_dataset_biology, sciknoweval_dataset_chemistry, sciknoweval_dataset_physics, sciknoweval_dataset_material]

View File

@ -0,0 +1,107 @@
import re
from datasets import load_dataset
from opencompass.openicl import BaseEvaluator
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
from .base import BaseDataset
def _parse(item, prompt_mode, discipline):
choices = item['choices']
item['q4'] = f'You are an expert in {discipline}.\n'
item['q4'] += item['prompt']['default'] + '\n' + item['question'] + '\n'
label_texts = []
for label_meta, text_meta in zip(choices['label'], choices['text']):
label_texts.append(f'{label_meta}. {text_meta}')
item['q4'] += '\n'.join(label_texts) # noqa: E501, E741, E741
item['prompt_mode'] = prompt_mode
return item
@LOAD_DATASET.register_module()
class SciKnowEvalDataset(BaseDataset):
@staticmethod
def load(path: str, prompt_mode: str, **kwargs):
def capitalize_first_letter(s):
if not s: # 检查字符串是否为空
return s
return s[0].upper() + s[1:]
subset = kwargs['subset']
data_files = {}
test_file = f'data/{capitalize_first_letter(subset)}/'
test_file += f'sciknoweval_{subset}_test.jsonl'
data_files['test'] = test_file
dataset = load_dataset(path, data_files=data_files, split='test')
# dataset = dataset.select(range(20))
if prompt_mode == 'zero-shot':
dataset = dataset.map(
lambda item: _parse(item, prompt_mode, subset),
load_from_cache_file=False)
elif prompt_mode == 'few-shot':
pass # TODO: Implement few-shot prompt
return dataset
class SciKnowEvalEvaluator(BaseEvaluator):
def score(self, predictions, references, test_set):
method = test_set['prompt_mode'][0]
if len(predictions) != len(references):
return {'error': 'preds and refrs have different length'}
correct = 0
count = 0
details = []
for idx, (i, j) in enumerate(zip(predictions, references)):
i = answer_cleansing(method, i, test_set['choices'][idx]['label'],
test_set['answerKey'][idx])
detail = {'pred': i, 'answer': j, 'correct': False}
count += 1
if i == j:
correct += 1
detail['correct'] = True
details.append(detail)
result = {'accuracy': 100 * correct / count, 'details': details}
return result
@TEXT_POSTPROCESSORS.register_module()
def answer_cleansing(
method: str,
prediction: str,
options: list,
label: str,
) -> str:
options_str = r'\b(' + '|'.join(options) + r')\b'
prediction = re.findall(options_str, prediction)
if len(prediction) == 0:
prediction = []
else:
# If there is a "label" and its length is 1,
# process prediction accordingly
if len(label) == 1:
if method == 'few-shot':
answer_flag = True if len(prediction) > 1 else False
# choose the first or last element based on the answer_flag
if answer_flag:
prediction = [prediction[0]]
else:
prediction = [prediction[-1]]
elif method == 'zero-shot':
# choose the first element in list
prediction = [prediction[0]]
else:
raise ValueError('Method is not properly defined ...')
# Remove trailing period if it exists
if prediction[0] and prediction[0].endswith('.'):
prediction[0] = prediction[0][:-1]
return prediction[0]

View File

@ -134,6 +134,7 @@ from .ruler import * # noqa: F401, F403
from .safety import * # noqa: F401, F403 from .safety import * # noqa: F401, F403
from .scibench import ScibenchDataset, scibench_postprocess # noqa: F401, F403 from .scibench import ScibenchDataset, scibench_postprocess # noqa: F401, F403
from .scicode import * # noqa: F401, F403 from .scicode import * # noqa: F401, F403
from .SciKnowEval import * # noqa: F401, F403
from .simpleqa import * # noqa: F401, F403 from .simpleqa import * # noqa: F401, F403
from .siqa import * # noqa: F401, F403 from .siqa import * # noqa: F401, F403
from .smolinstruct import * # noqa: F401, F403 from .smolinstruct import * # noqa: F401, F403