mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Dataset] Support MedMCQA and MedBullets benchmark (#2054)
* support medmcqa and medbullets benchmark * Add Medbullets data folder for benchmark support * revise gen name * revise config file & remove csv file & add dataset info to dataset-index.yml * remove csv file * remove print in medbullets.py * revise class name * update_oss_info --------- Co-authored-by: MaiziXiao <xxllcc1993@gmail.com>
This commit is contained in:
parent
d60f59dcab
commit
b84518c656
@ -1095,3 +1095,15 @@
|
||||
paper: https://arxiv.org/pdf/2308.04709
|
||||
configpath: opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen.py
|
||||
configpath_llmjudge: opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen.py
|
||||
- medbullets:
|
||||
name: Medbullets
|
||||
category: Science /Medicine
|
||||
paper: https://arxiv.org/pdf/2402.18060
|
||||
configpath: opencompass/configs/datasets/Medbullets/medbullets_gen.py
|
||||
configpath_llmjudge: opencompass/configs/datasets/Medbullets/medbullets_llmjudge_gen.py
|
||||
- medmcqa:
|
||||
name: medmcqa
|
||||
category: Science /Medicine
|
||||
paper: https://arxiv.org/pdf/2203.14371
|
||||
configpath: opencompass/configs/datasets/medmcqa/medmcqa_gen.py
|
||||
configpath_llmjudge: opencompass/configs/datasets/medmcqa/medmcqa_llmjudge_gen.py
|
||||
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .medbullets_gen_60c8f5 import medbullets_datasets # noqa: F401, F403
|
@ -0,0 +1,59 @@
|
||||
from opencompass.datasets import MedbulletsDataset, MedbulletsEvaluator
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
|
||||
import os
|
||||
|
||||
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
|
||||
ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n'
|
||||
|
||||
# Reader configuration
|
||||
reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question',
|
||||
'options',
|
||||
'question_type',
|
||||
'prompt_mode',
|
||||
|
||||
],
|
||||
output_column='label',
|
||||
)
|
||||
|
||||
# Inference configuration
|
||||
infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
# Evaluation configuration
|
||||
eval_cfg = dict(
|
||||
evaluator=dict(type=MedbulletsEvaluator),
|
||||
pred_role='BOT',
|
||||
)
|
||||
medbullets_dataset = dict(
|
||||
type=MedbulletsDataset,
|
||||
abbr='medbullets',
|
||||
path='opencompass/medbullets',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
infer_cfg=infer_cfg,
|
||||
eval_cfg=eval_cfg,
|
||||
|
||||
)
|
||||
|
||||
medbullets_datasets = [medbullets_dataset]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .medbullets_llmjudge_gen_60c8f5 import medbullets_datasets # noqa: F401, F403
|
@ -0,0 +1,106 @@
|
||||
from opencompass.datasets import MedbulletsDataset, medbullets_llmjudge_postprocess
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
import os
|
||||
|
||||
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
|
||||
ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n'
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: Q: {question}\nPlease select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{label}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
# Reader configuration
|
||||
reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question',
|
||||
'options',
|
||||
'question_type',
|
||||
'prompt_mode',
|
||||
|
||||
],
|
||||
output_column='label',
|
||||
)
|
||||
|
||||
# Inference configuration
|
||||
infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
# Evaluation configuration
|
||||
eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MedbulletsDataset,
|
||||
path='opencompass/medbullets',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=medbullets_llmjudge_postprocess),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
medbullets_dataset = dict(
|
||||
type=MedbulletsDataset,
|
||||
abbr='medbullets',
|
||||
path='opencompass/medbullets',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
infer_cfg=infer_cfg,
|
||||
eval_cfg=eval_cfg,
|
||||
|
||||
)
|
||||
|
||||
medbullets_datasets = [medbullets_dataset]
|
4
opencompass/configs/datasets/medmcqa/medmcqa_gen.py
Normal file
4
opencompass/configs/datasets/medmcqa/medmcqa_gen.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .medmcqa_gen_60c8f5 import medmcqa_datasets # noqa: F401, F403
|
58
opencompass/configs/datasets/medmcqa/medmcqa_gen_60c8f5.py
Normal file
58
opencompass/configs/datasets/medmcqa/medmcqa_gen_60c8f5.py
Normal file
@ -0,0 +1,58 @@
|
||||
from opencompass.datasets import MedmcqaDataset, MedmcqaEvaluator
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
|
||||
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
|
||||
ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n'
|
||||
|
||||
# Reader configuration
|
||||
reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question',
|
||||
'options',
|
||||
'subject_name',
|
||||
'choice_type',
|
||||
'prompt_mode',
|
||||
'topic_name',
|
||||
],
|
||||
output_column='label',
|
||||
)
|
||||
|
||||
# Inference configuration
|
||||
infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
# Evaluation configuration
|
||||
eval_cfg = dict(
|
||||
evaluator=dict(type=MedmcqaEvaluator),
|
||||
pred_role='BOT',
|
||||
)
|
||||
medmcqa_dataset = dict(
|
||||
type=MedmcqaDataset,
|
||||
abbr='medmcqa',
|
||||
path='openlifescienceai/medmcqa',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
infer_cfg=infer_cfg,
|
||||
eval_cfg=eval_cfg,
|
||||
|
||||
)
|
||||
|
||||
medmcqa_datasets = [medmcqa_dataset]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .medmcqa_llmjudge_gen_60c8f5 import medmcqa_datasets # noqa: F401, F403
|
@ -0,0 +1,105 @@
|
||||
from opencompass.datasets import MedmcqaDataset, medmcqa_llmjudge_postprocess
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this?
|
||||
ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n'
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: Q: {question}\nPlease select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{label}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
|
||||
# Reader configuration
|
||||
reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question',
|
||||
'options',
|
||||
'subject_name',
|
||||
'choice_type',
|
||||
'prompt_mode',
|
||||
'topic_name',
|
||||
],
|
||||
output_column='label',
|
||||
)
|
||||
|
||||
# Inference configuration
|
||||
infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
# Evaluation configuration
|
||||
eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MedmcqaDataset,
|
||||
path='openlifescienceai/medmcqa',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(type=medmcqa_llmjudge_postprocess),
|
||||
),
|
||||
)
|
||||
medmcqa_dataset = dict(
|
||||
type=MedmcqaDataset,
|
||||
abbr='medmcqa',
|
||||
path='openlifescienceai/medmcqa',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
infer_cfg=infer_cfg,
|
||||
eval_cfg=eval_cfg,
|
||||
|
||||
)
|
||||
|
||||
medmcqa_datasets = [medmcqa_dataset]
|
243
opencompass/datasets/Medbullets.py
Normal file
243
opencompass/datasets/Medbullets.py
Normal file
@ -0,0 +1,243 @@
|
||||
import re
|
||||
|
||||
import pandas as pd
|
||||
from datasets import Dataset
|
||||
|
||||
from opencompass.openicl import BaseEvaluator
|
||||
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
|
||||
from opencompass.utils import get_data_path, get_logger
|
||||
|
||||
from .base import BaseDataset
|
||||
|
||||
|
||||
def _parse(item: dict, prompt_mode: str) -> dict:
|
||||
# 构建选项列表,忽略空字符串的 ope
|
||||
options_keys = ['opa', 'opb', 'opc', 'opd']
|
||||
if item.get('ope', '') != '':
|
||||
options_keys.append('ope')
|
||||
options_list = [item.get(k, '') for k in options_keys]
|
||||
item['options'] = options_list
|
||||
|
||||
# 构建带标号的选项字符串
|
||||
options_str = '\n'.join(
|
||||
[f'{chr(65 + i)}. {opt}' for i, opt in enumerate(options_list)])
|
||||
|
||||
# 将选项附加到问题末尾
|
||||
item['question'] = f"{item.get('question', '')}\n{options_str}"
|
||||
|
||||
# 标签及其他字段
|
||||
item['label'] = item.get('answer_idx')
|
||||
item['prompt_mode'] = prompt_mode
|
||||
item['start'] = chr(65)
|
||||
item['end'] = chr(65 + len(options_list) - 1)
|
||||
return item
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class MedbulletsDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str, prompt_mode: str = 'zero-shot', **kwargs):
|
||||
# 读取 CSV 文件为 DataFrame,并将 NaN 转为空字符串
|
||||
path = get_data_path(path)
|
||||
df = pd.read_csv(path, encoding='utf-8')
|
||||
df = df.fillna('')
|
||||
|
||||
# 转换为字典列表
|
||||
data_list = df.to_dict(orient='records')
|
||||
|
||||
# 将数据列表包装为 Dataset
|
||||
dataset = Dataset.from_list(data_list)
|
||||
|
||||
# 根据提示模式进行解析
|
||||
if prompt_mode == 'zero-shot':
|
||||
dataset = dataset.map(lambda item: _parse(item, prompt_mode))
|
||||
elif prompt_mode == 'few-shot':
|
||||
pass # TODO: Implement few-shot prompt handling
|
||||
return dataset
|
||||
|
||||
|
||||
class MedbulletsEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, references, test_set):
|
||||
method = test_set['prompt_mode'][0]
|
||||
|
||||
if len(predictions) != len(references):
|
||||
return {'error': 'preds and refrs have different length'}
|
||||
correct = 0
|
||||
count = 0
|
||||
details = []
|
||||
for idx, (i, j) in enumerate(zip(predictions, references)):
|
||||
i = answer_cleansing(method, i, test_set['options'][idx],
|
||||
test_set['label'][idx])
|
||||
detail = {
|
||||
'pred': i,
|
||||
'answer': j,
|
||||
'correct': False,
|
||||
'question_type': test_set['question_type'][idx]
|
||||
}
|
||||
count += 1
|
||||
if i == j:
|
||||
correct += 1
|
||||
detail['correct'] = True
|
||||
details.append(detail)
|
||||
result = {'accuracy': 100 * correct / count, 'details': details}
|
||||
return result
|
||||
|
||||
|
||||
@TEXT_POSTPROCESSORS.register_module()
|
||||
def answer_cleansing(
|
||||
method: str,
|
||||
prediction: str,
|
||||
options: list,
|
||||
label: str,
|
||||
) -> str:
|
||||
|
||||
# Clean up unwanted phrases in the prediction
|
||||
for unwanted_phrase in [
|
||||
'I understand',
|
||||
'A through J',
|
||||
'A through E',
|
||||
'A through D',
|
||||
]:
|
||||
prediction = prediction.replace(unwanted_phrase, '')
|
||||
|
||||
options_num = len(options)
|
||||
options = [chr(65 + i) for i in range(options_num)]
|
||||
options_str = r'\b(' + '|'.join(options) + r')\b'
|
||||
prediction = re.findall(options_str, prediction)
|
||||
|
||||
if len(prediction) == 0:
|
||||
prediction = []
|
||||
return prediction
|
||||
else:
|
||||
# If there is a "label" and its length is 1,
|
||||
# process prediction accordingly
|
||||
if len(label) == 1:
|
||||
if method == 'few-shot':
|
||||
answer_flag = True if len(prediction) > 1 else False
|
||||
# choose the first or last element based on the answer_flag
|
||||
if answer_flag:
|
||||
prediction = [prediction[0]]
|
||||
else:
|
||||
prediction = [prediction[-1]]
|
||||
elif method == 'zero-shot':
|
||||
# choose the first element in list
|
||||
prediction = [prediction[0]]
|
||||
else:
|
||||
raise ValueError('Method is not properly defined ...')
|
||||
|
||||
# Remove trailing period if it exists
|
||||
if prediction[0] and prediction[0].endswith('.'):
|
||||
prediction[0] = prediction[0][:-1]
|
||||
|
||||
return prediction[0]
|
||||
|
||||
|
||||
def _generic_llmjudge_postprocess(judgement: str):
|
||||
match = re.search(r'(A|B)', judgement)
|
||||
grade_letter = (match.group(0) if match else 'B'
|
||||
) # Default to "INCORRECT" if no match
|
||||
return grade_letter
|
||||
|
||||
|
||||
def medbullets_llmjudge_postprocess(
|
||||
output: dict,
|
||||
output_path: str,
|
||||
dataset: Dataset,
|
||||
) -> dict:
|
||||
original_dataset = dataset.reader.dataset['test']
|
||||
|
||||
judged_answers = []
|
||||
original_responses = []
|
||||
references = []
|
||||
details = []
|
||||
|
||||
# Initialize statistics dictionaries
|
||||
stats = {'question_type': {}}
|
||||
|
||||
total_correct = 0
|
||||
total_count = 0
|
||||
|
||||
# Process each sample
|
||||
for k, v in output.items():
|
||||
idx = int(k) # Convert key to integer for indexing
|
||||
original_responses.append(v['prediction'])
|
||||
processed_judge = _generic_llmjudge_postprocess(v['prediction'])
|
||||
|
||||
# Get category information from the dataset
|
||||
sample = original_dataset[idx]
|
||||
question_type = sample.get('question_type', 'unknown')
|
||||
|
||||
# Initialize category stats if not exists
|
||||
for level, key in [
|
||||
('question_type', question_type),
|
||||
]:
|
||||
if key not in stats[level]:
|
||||
stats[level][key] = {'correct': 0, 'total': 0}
|
||||
|
||||
# Record the judgment
|
||||
if processed_judge is not None:
|
||||
judged_answers.append(processed_judge)
|
||||
try:
|
||||
gold = v['gold']
|
||||
references.append(gold)
|
||||
except KeyError:
|
||||
get_logger().warning(
|
||||
f'No gold answer for {k}, use empty string as reference!')
|
||||
gold = ''
|
||||
references.append('')
|
||||
|
||||
# Check if the answer is correct (A means correct)
|
||||
is_correct = processed_judge == 'A'
|
||||
total_count += 1
|
||||
|
||||
if is_correct:
|
||||
total_correct += 1
|
||||
# Update category stats
|
||||
for level, key in [
|
||||
('question_type', question_type),
|
||||
]:
|
||||
stats[level][key]['correct'] += 1
|
||||
|
||||
# Update category totals
|
||||
for level, key in [
|
||||
('question_type', question_type),
|
||||
]:
|
||||
stats[level][key]['total'] += 1
|
||||
# Add to details
|
||||
details.append({
|
||||
'id': k,
|
||||
'origin_prompt': v['origin_prompt'],
|
||||
'llm_judge': processed_judge,
|
||||
'gold': gold,
|
||||
'is_correct': is_correct,
|
||||
'question_type': question_type,
|
||||
})
|
||||
|
||||
# Calculate overall accuracy with two decimal places
|
||||
overall_accuracy = (round(
|
||||
(total_correct / total_count * 100), 2) if total_count > 0 else 0.00)
|
||||
|
||||
# Initialize results dictionary
|
||||
results = {
|
||||
'accuracy': overall_accuracy,
|
||||
'total_correct': total_correct,
|
||||
'total_count': total_count,
|
||||
'details': details,
|
||||
}
|
||||
|
||||
# Calculate accuracy for each category and flatten into results
|
||||
for level in stats:
|
||||
for key, value in stats[level].items():
|
||||
if value['total'] > 0:
|
||||
# Calculate accuracy with two decimal places
|
||||
accuracy = round((value['correct'] / value['total'] * 100), 2)
|
||||
|
||||
# Create a flattened key for the category
|
||||
flat_key = f'Medbullets-{key}'
|
||||
|
||||
# Add to results
|
||||
results[flat_key] = accuracy
|
||||
|
||||
return results
|
@ -99,8 +99,10 @@ from .mathbench import * # noqa: F401, F403
|
||||
from .mbpp import * # noqa: F401, F403
|
||||
from .mbpp_pro import * # noqa: F401, F403
|
||||
from .medbench import * # noqa: F401, F403
|
||||
from .Medbullets import * # noqa: F401, F403
|
||||
from .MedCalc_Bench import MedCalc_BenchDataset # noqa: F401
|
||||
from .MedCalc_Bench import MedCalcOfficial_Evaluator # noqa: F401
|
||||
from .medmcqa import * # noqa: F401, F403
|
||||
from .MedQA import * # noqa: F401, F403
|
||||
from .MedXpertQA import * # noqa: F401, F403
|
||||
from .mgsm import * # noqa: F401, F403
|
||||
|
247
opencompass/datasets/medmcqa.py
Normal file
247
opencompass/datasets/medmcqa.py
Normal file
@ -0,0 +1,247 @@
|
||||
import re
|
||||
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.openicl import BaseEvaluator
|
||||
from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS
|
||||
from opencompass.utils import get_logger
|
||||
|
||||
from .base import BaseDataset
|
||||
|
||||
|
||||
def _parse(item, prompt_mode):
|
||||
options_list = [item['opa'], item['opb'], item['opc'], item['opd']]
|
||||
item['options'] = options_list
|
||||
|
||||
# 构建带标号的选项字符串
|
||||
options_str = '\n'.join(
|
||||
[f'{chr(65 + i)}. {opt}' for i, opt in enumerate(options_list)])
|
||||
|
||||
# 将选项附加到问题末尾
|
||||
item['question'] = f"{item['question']}\n{options_str}"
|
||||
|
||||
item['label'] = chr(65 + item['cop'])
|
||||
item['subject_name'] = item['subject_name'].replace('_', ' ')
|
||||
item['prompt_mode'] = prompt_mode
|
||||
item['start'] = chr(65)
|
||||
item['end'] = chr(65 + len(options_list) - 1) # 使用实际选项数量
|
||||
return item
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class MedmcqaDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str, prompt_mode: str = 'zero-shot', **kwargs):
|
||||
dataset = load_dataset(path=path,
|
||||
split='validation',
|
||||
trust_remote_code=True)
|
||||
|
||||
if prompt_mode == 'zero-shot':
|
||||
dataset = dataset.map(lambda item: _parse(item, prompt_mode))
|
||||
elif prompt_mode == 'few-shot':
|
||||
pass # TODO: Implement few-shot prompt
|
||||
|
||||
return dataset
|
||||
|
||||
|
||||
class MedmcqaEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, references, test_set):
|
||||
method = test_set['prompt_mode'][0]
|
||||
|
||||
if len(predictions) != len(references):
|
||||
return {'error': 'preds and refrs have different length'}
|
||||
correct = 0
|
||||
count = 0
|
||||
details = []
|
||||
for idx, (i, j) in enumerate(zip(predictions, references)):
|
||||
i = answer_cleansing(method, i, test_set['options'][idx],
|
||||
test_set['label'][idx])
|
||||
detail = {
|
||||
'pred': i,
|
||||
'answer': j,
|
||||
'correct': False,
|
||||
'subject_name': test_set['subject_name'][idx],
|
||||
'topic_name': test_set['topic_name'][idx],
|
||||
'choice_type': test_set['choice_type'][idx]
|
||||
}
|
||||
count += 1
|
||||
if i == j:
|
||||
correct += 1
|
||||
detail['correct'] = True
|
||||
details.append(detail)
|
||||
result = {'accuracy': 100 * correct / count, 'details': details}
|
||||
return result
|
||||
|
||||
|
||||
@TEXT_POSTPROCESSORS.register_module()
|
||||
def answer_cleansing(
|
||||
method: str,
|
||||
prediction: str,
|
||||
options: list,
|
||||
label: str,
|
||||
) -> str:
|
||||
|
||||
# Clean up unwanted phrases in the prediction
|
||||
for unwanted_phrase in [
|
||||
'I understand',
|
||||
'A through J',
|
||||
'A through E',
|
||||
'A through D',
|
||||
]:
|
||||
prediction = prediction.replace(unwanted_phrase, '')
|
||||
|
||||
options_num = len(options)
|
||||
options = [chr(65 + i) for i in range(options_num)]
|
||||
options_str = r'\b(' + '|'.join(options) + r')\b'
|
||||
prediction = re.findall(options_str, prediction)
|
||||
|
||||
if len(prediction) == 0:
|
||||
prediction = []
|
||||
return prediction
|
||||
else:
|
||||
# If there is a "label" and its length is 1,
|
||||
# process prediction accordingly
|
||||
if len(label) == 1:
|
||||
if method == 'few-shot':
|
||||
answer_flag = True if len(prediction) > 1 else False
|
||||
# choose the first or last element based on the answer_flag
|
||||
if answer_flag:
|
||||
prediction = [prediction[0]]
|
||||
else:
|
||||
prediction = [prediction[-1]]
|
||||
elif method == 'zero-shot':
|
||||
# choose the first element in list
|
||||
prediction = [prediction[0]]
|
||||
else:
|
||||
raise ValueError('Method is not properly defined ...')
|
||||
|
||||
# Remove trailing period if it exists
|
||||
if prediction[0] and prediction[0].endswith('.'):
|
||||
prediction[0] = prediction[0][:-1]
|
||||
|
||||
return prediction[0]
|
||||
|
||||
|
||||
def _generic_llmjudge_postprocess(judgement: str):
|
||||
match = re.search(r'(A|B)', judgement)
|
||||
grade_letter = (match.group(0) if match else 'B'
|
||||
) # Default to "INCORRECT" if no match
|
||||
return grade_letter
|
||||
|
||||
|
||||
def medmcqa_llmjudge_postprocess(
|
||||
output: dict,
|
||||
output_path: str,
|
||||
dataset: Dataset,
|
||||
) -> dict:
|
||||
# Get the original dataset
|
||||
original_dataset = dataset.reader.dataset['test']
|
||||
|
||||
judged_answers = []
|
||||
original_responses = []
|
||||
references = []
|
||||
details = []
|
||||
|
||||
# Initialize statistics dictionaries
|
||||
stats = {'subject': {}, 'topic': {}, 'question_type': {}}
|
||||
|
||||
total_correct = 0
|
||||
total_count = 0
|
||||
|
||||
# Process each sample
|
||||
for k, v in output.items():
|
||||
idx = int(k) # Convert key to integer for indexing
|
||||
original_responses.append(v['prediction'])
|
||||
|
||||
processed_judge = _generic_llmjudge_postprocess(v['prediction'])
|
||||
|
||||
# Get category information from the dataset
|
||||
sample = original_dataset[idx]
|
||||
subject = sample.get('subject_name', 'unknown')
|
||||
question_type = sample.get('choice_type', 'unknown')
|
||||
topic = sample.get('topic_name', 'unknown')
|
||||
|
||||
# Initialize category stats if not exists
|
||||
for level, key in [
|
||||
('subject', subject),
|
||||
('question_type', question_type),
|
||||
('topic', topic),
|
||||
]:
|
||||
if key not in stats[level]:
|
||||
stats[level][key] = {'correct': 0, 'total': 0}
|
||||
|
||||
# Record the judgment
|
||||
if processed_judge is not None:
|
||||
judged_answers.append(processed_judge)
|
||||
try:
|
||||
gold = v['gold']
|
||||
references.append(gold)
|
||||
except KeyError:
|
||||
get_logger().warning(
|
||||
f'No gold answer for {k}, use empty string as reference!')
|
||||
gold = ''
|
||||
references.append('')
|
||||
|
||||
# Check if the answer is correct (A means correct)
|
||||
is_correct = processed_judge == 'A'
|
||||
total_count += 1
|
||||
|
||||
if is_correct:
|
||||
total_correct += 1
|
||||
# Update category stats
|
||||
for level, key in [
|
||||
('subject', subject),
|
||||
('question_type', question_type),
|
||||
('topic', topic),
|
||||
]:
|
||||
stats[level][key]['correct'] += 1
|
||||
|
||||
# Update category totals
|
||||
for level, key in [
|
||||
('subject', subject),
|
||||
('question_type', question_type),
|
||||
('topic', topic),
|
||||
]:
|
||||
stats[level][key]['total'] += 1
|
||||
# Add to details
|
||||
details.append({
|
||||
'id': k,
|
||||
'question': sample['question'],
|
||||
'options': sample['options'],
|
||||
'origin_prompt': v['origin_prompt'],
|
||||
'llm_judge': processed_judge,
|
||||
'gold': gold,
|
||||
'is_correct': is_correct,
|
||||
'subject': subject,
|
||||
'question_type': question_type,
|
||||
'topic': topic,
|
||||
})
|
||||
|
||||
# Calculate overall accuracy with two decimal places
|
||||
overall_accuracy = (round(
|
||||
(total_correct / total_count * 100), 2) if total_count > 0 else 0.00)
|
||||
|
||||
# Initialize results dictionary
|
||||
results = {
|
||||
'accuracy': overall_accuracy,
|
||||
'total_correct': total_correct,
|
||||
'total_count': total_count,
|
||||
'details': details,
|
||||
}
|
||||
|
||||
# Calculate accuracy for each category and flatten into results
|
||||
for level in stats:
|
||||
for key, value in stats[level].items():
|
||||
if value['total'] > 0:
|
||||
# Calculate accuracy with two decimal places
|
||||
accuracy = round((value['correct'] / value['total'] * 100), 2)
|
||||
|
||||
# Create a flattened key for the category
|
||||
flat_key = f'medmcqa-{key}'
|
||||
|
||||
# Add to results
|
||||
results[flat_key] = accuracy
|
||||
|
||||
return results
|
@ -461,6 +461,12 @@ DATASETS_MAPPING = {
|
||||
"hf_id": "",
|
||||
"local": "./data/mbpp_pro/mbpp_pro.json",
|
||||
},
|
||||
"opencompass/medbullets": {
|
||||
"ms_id": "",
|
||||
"hf_id": "",
|
||||
"local": "./data/medbullets/medbullets.csv",
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
DATASETS_URL = {
|
||||
@ -826,4 +832,8 @@ DATASETS_URL = {
|
||||
"url": "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/mbpp_pro.zip",
|
||||
"md5": "eac330b8a0a8687f006265c9383503ce",
|
||||
},
|
||||
"medbullets": {
|
||||
"url": "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/medbullets.zip",
|
||||
"md5": "b63130999c1f28d57acba1c7852639f8",
|
||||
},
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user