diff --git a/dataset-index.yml b/dataset-index.yml index ae7f089a..24adc501 100644 --- a/dataset-index.yml +++ b/dataset-index.yml @@ -128,6 +128,24 @@ paper: https://arxiv.org/abs/2501.18362 configpath: opencompass/configs/datasets/MedXpertQA/MedXpertQA_gen.py configpath_llmjudge: opencompass/configs/datasets/MedXpertQA/MedXpertQA_llmjudge_gen.py +- ClinicBench: + name: ClinicBench + category: Knowledge / Medicine + paper: https://arxiv.org/abs/2405.00716 + configpath: '' + configpath_llmjudge: opencompass/configs/datasets/ClinicBench/ClinicBench_llmjudge_gen.py +- ScienceQA: + name: ScienceQA + category: Knowledge / Medicine + paper: https://arxiv.org/abs/2209.09513 + configpath: '' + configpath_llmjudge: opencompass/configs/datasets/ScienceQA/ScienceQA_llmjudge_gen.py +- PubMedQA: + name: PubMedQA + category: Knowledge / Medicine + paper: https://arxiv.org/abs/1909.06146 + configpath: '' + configpath_llmjudge: opencompass/configs/datasets/PubMedQA/PubMedQA_llmjudge_gen.py - musr: name: MuSR category: Reasoning @@ -343,6 +361,12 @@ paper: https://arxiv.org/pdf/2004.05986 configpath: opencompass/configs/datasets/CLUE_C3/CLUE_C3_gen.py configpath_llmjudge: '' +- CARDBiomedBench: + name: CARDBiomedBench + category: Knowledge / Medicine + paper: https://www.biorxiv.org/content/10.1101/2025.01.15.633272v1 + configpath: opencompass/configs/datasets/CARDBiomedBench + configpath_llmjudge: 'opencompass/configs/datasets/CARDBiomedBench/CARDBiomedBench_llmjudge_gen_99a231.py' - cb: name: SuperGLUE / CB category: Reasoning @@ -1029,6 +1053,12 @@ paper: '' configpath: opencompass/configs/datasets/internsandbox/internsandbox_gen_44b982.py configpath_llmjudge: '' +- nejmaibench: + name: nejmaibench + category: Science /Medicine + paper: https://arxiv.org/pdf/2308.04709 + configpath: opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen.py + configpath_llmjudge: opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen.py - medbullets: name: Medbullets category: Science /Medicine diff --git a/opencompass/configs/datasets/CARDBiomedBench/CARDBiomedBench_llmjudge_gen_99a231.py b/opencompass/configs/datasets/CARDBiomedBench/CARDBiomedBench_llmjudge_gen_99a231.py new file mode 100644 index 00000000..c6acb71e --- /dev/null +++ b/opencompass/configs/datasets/CARDBiomedBench/CARDBiomedBench_llmjudge_gen_99a231.py @@ -0,0 +1,101 @@ +from opencompass.datasets import CARDBiomedBenchDataset +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.evaluator import GenericLLMEvaluator +ZERO_SHOT_PROMPT = 'You are an expert in {expert}.\n{question}\n' + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + : Q: You are an expert in {expert}.\n{question}\n\n\n + : \n{answer}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + + +# Reader configuration +reader_cfg = dict( + input_columns=[ + 'question', + 'answer', + 'Bio_Category', + 'SQL_Category', + 'uuid', + 'template uuid', + 'expert', + ], + output_column='answer', +) +# Inference configuration +infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict( + + role='HUMAN', + prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot + ), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + +# Evaluation configuration +eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=CARDBiomedBenchDataset, + path='NIH-CARD/CARDBiomedBench', + prompt_mode='zero-shot', + reader_cfg=reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), +) +cardbiomedbench_dataset = dict( + type=CARDBiomedBenchDataset, + abbr='cardbiomedbench', + path='NIH-CARD/CARDBiomedBench', + prompt_mode='zero-shot', + reader_cfg=reader_cfg, + infer_cfg=infer_cfg, + eval_cfg=eval_cfg, +) +cardbiomedbench_datasets = [cardbiomedbench_dataset] diff --git a/opencompass/configs/datasets/ClinicBench/ClinicBench_llmjudge_gen.py b/opencompass/configs/datasets/ClinicBench/ClinicBench_llmjudge_gen.py new file mode 100644 index 00000000..febfce11 --- /dev/null +++ b/opencompass/configs/datasets/ClinicBench/ClinicBench_llmjudge_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .ClinicBench_llmjudge_gen_d09668 import ClinicBench_datasets \ No newline at end of file diff --git a/opencompass/configs/datasets/ClinicBench/ClinicBench_llmjudge_gen_d09668.py b/opencompass/configs/datasets/ClinicBench/ClinicBench_llmjudge_gen_d09668.py new file mode 100644 index 00000000..358a91f5 --- /dev/null +++ b/opencompass/configs/datasets/ClinicBench/ClinicBench_llmjudge_gen_d09668.py @@ -0,0 +1,100 @@ +from mmengine.config import read_base +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.datasets.ClinicBench import ClinicBenchDataset + + +QUERY_TEMPLATE = """ +Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering. + +Question:\n +{question} + +Options:\n +{choices} + +""".strip() + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + : {question}\n {choices} \n\n\n + : \n{label}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + +ClinicBench_datasets = [] + +ClinicBench_reader_cfg = dict( + input_columns=['question', 'choices'], + output_column='label', +) + +ClinicBench_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt=QUERY_TEMPLATE), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + +ClinicBench_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=ClinicBenchDataset, + path='xuxuxuxuxu/Pharmacology-QA', + reader_cfg=ClinicBench_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), +) + +ClinicBench_datasets.append( + dict( + abbr=f'ClinicBench', + type=ClinicBenchDataset, + path='xuxuxuxuxu/Pharmacology-QA', + reader_cfg=ClinicBench_reader_cfg, + infer_cfg=ClinicBench_infer_cfg, + eval_cfg=ClinicBench_eval_cfg, + ) +) diff --git a/opencompass/configs/datasets/PubMedQA/PubMedQA_llmjudge_gen.py b/opencompass/configs/datasets/PubMedQA/PubMedQA_llmjudge_gen.py new file mode 100644 index 00000000..4055d0f5 --- /dev/null +++ b/opencompass/configs/datasets/PubMedQA/PubMedQA_llmjudge_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .PubMedQA_llmjudge_gen_f00302 import PubMedQA_datasets \ No newline at end of file diff --git a/opencompass/configs/datasets/PubMedQA/PubMedQA_llmjudge_gen_f00302.py b/opencompass/configs/datasets/PubMedQA/PubMedQA_llmjudge_gen_f00302.py new file mode 100644 index 00000000..b38a8fe5 --- /dev/null +++ b/opencompass/configs/datasets/PubMedQA/PubMedQA_llmjudge_gen_f00302.py @@ -0,0 +1,94 @@ +from mmengine.config import read_base +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.datasets.PubMedQA import PubMedQADataset + + +QUERY_TEMPLATE = """ +Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering. +Question:\n +{question} +Options:\n +{choices} +""".strip() + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + : {question}\n {choices} \n\n\n + : \n{label}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + +PubMedQA_datasets = [] + +PubMedQA_reader_cfg = dict( + input_columns=['question', 'choices'], + output_column='label', +) + +PubMedQA_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt=QUERY_TEMPLATE), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + +PubMedQA_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=PubMedQADataset, + path='qiaojin/PubMedQA', + reader_cfg=PubMedQA_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), +) + +PubMedQA_datasets.append( + dict( + abbr=f'PubMedQA', + type=PubMedQADataset, + path='qiaojin/PubMedQA', + reader_cfg=PubMedQA_reader_cfg, + infer_cfg=PubMedQA_infer_cfg, + eval_cfg=PubMedQA_eval_cfg, + ) +) \ No newline at end of file diff --git a/opencompass/configs/datasets/ScienceQA/ScienceQA_llmjudge_gen.py b/opencompass/configs/datasets/ScienceQA/ScienceQA_llmjudge_gen.py new file mode 100644 index 00000000..32305456 --- /dev/null +++ b/opencompass/configs/datasets/ScienceQA/ScienceQA_llmjudge_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .ScienceQA_llmjudge_gen_f00302 import ScienceQA_datasets \ No newline at end of file diff --git a/opencompass/configs/datasets/ScienceQA/ScienceQA_llmjudge_gen_f00302.py b/opencompass/configs/datasets/ScienceQA/ScienceQA_llmjudge_gen_f00302.py new file mode 100644 index 00000000..e128c2a0 --- /dev/null +++ b/opencompass/configs/datasets/ScienceQA/ScienceQA_llmjudge_gen_f00302.py @@ -0,0 +1,94 @@ +from mmengine.config import read_base +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.evaluator import GenericLLMEvaluator +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.datasets.ScienceQA import ScienceQADataset + + +QUERY_TEMPLATE = """ +Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of Options(e.g. one of ABCDEFGHIJKLMNOP). Think step by step before answering. +Question:\n +{question} +Options:\n +{choices} +""".strip() + +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + : {question}\n {choices} \n\n\n + : \n{label}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + +ScienceQA_datasets = [] + +ScienceQA_reader_cfg = dict( + input_columns=['question', 'choices'], + output_column='label', +) + +ScienceQA_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt=QUERY_TEMPLATE), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + +ScienceQA_eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=ScienceQADataset, + path='derek-thomas/ScienceQA', + reader_cfg=ScienceQA_reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), +) + +ScienceQA_datasets.append( + dict( + abbr=f'ScienceQA', + type=ScienceQADataset, + path='derek-thomas/ScienceQA', + reader_cfg=ScienceQA_reader_cfg, + infer_cfg=ScienceQA_infer_cfg, + eval_cfg=ScienceQA_eval_cfg, + ) +) \ No newline at end of file diff --git a/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen.py b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen.py new file mode 100644 index 00000000..2116726c --- /dev/null +++ b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .nejmaibench_gen_60c8f5 import nejmaibench_datasets # noqa: F401, F403 \ No newline at end of file diff --git a/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen_60c8f5.py b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen_60c8f5.py new file mode 100644 index 00000000..ec817c57 --- /dev/null +++ b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_gen_60c8f5.py @@ -0,0 +1,59 @@ +from opencompass.datasets import NejmaibenchDataset, NejmaibenchEvaluator +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever + +import os + +SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this? +ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n' + +# Reader configuration +reader_cfg = dict( + input_columns=[ + 'question', + 'options', + 'Subject', + 'prompt_mode', + + ], + output_column='label', +) + +# Inference configuration +infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT), + ], + round=[ + dict( + role='HUMAN', + prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot + ), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + +# Evaluation configuration +eval_cfg = dict( + evaluator=dict(type=NejmaibenchEvaluator), + pred_role='BOT', +) +nejmaibench_dataset = dict( + type=NejmaibenchDataset, + abbr='nejmaibench', + path='opencompass/nejmaibench', + prompt_mode='zero-shot', + reader_cfg=reader_cfg, + infer_cfg=infer_cfg, + eval_cfg=eval_cfg, + +) + +nejmaibench_datasets = [nejmaibench_dataset] diff --git a/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen.py b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen.py new file mode 100644 index 00000000..de683ccc --- /dev/null +++ b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen.py @@ -0,0 +1,4 @@ +from mmengine.config import read_base + +with read_base(): + from .nejmaibench_llmjudge_gen_60c8f5 import nejmaibench_datasets # noqa: F401, F403 \ No newline at end of file diff --git a/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen_60c8f5.py b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen_60c8f5.py new file mode 100644 index 00000000..31be8049 --- /dev/null +++ b/opencompass/configs/datasets/nejm_ai_benchmark/nejmaibench_llmjudge_gen_60c8f5.py @@ -0,0 +1,108 @@ +from opencompass.datasets import NejmaibenchDataset +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever +from opencompass.datasets import generic_llmjudge_postprocess +from opencompass.evaluator import GenericLLMEvaluator +import os + +SYSTEM_PROMPT = 'You are a helpful medical assistant.\n\n' # Where to put this? +ZERO_SHOT_PROMPT = 'Q: {question}\n Please select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n' +GRADER_TEMPLATE = """ + Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly. + + Here are some evaluation criteria: + 1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct. + 2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question. + 3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct. + 4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct. + + Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of: + A: CORRECT + B: INCORRECT + Just return the letters "A" or "B", with no text around it. + + Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. + + : Q: {question}\nPlease select the correct answer from the options above and output only the corresponding letter (A, B, C, D, or E) without any explanation or additional text.\n\n\n\n + : \n{label}\n\n\n + : \n{prediction}\n\n\n + Judging the correctness of candidates' answers: +""".strip() + +# Reader configuration +reader_cfg = dict( + input_columns=[ + 'question', + 'options', + 'Subject', + 'prompt_mode', + + ], + output_column='label', +) + + +# Inference configuration +infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict(role='SYSTEM', fallback_role='HUMAN', prompt=SYSTEM_PROMPT), + ], + round=[ + dict( + role='HUMAN', + prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot + ), + ], + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + +# Evaluation configuration +eval_cfg = dict( + evaluator=dict( + type=GenericLLMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict( + role='SYSTEM', + fallback_role='HUMAN', + prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.", + ) + ], + round=[ + dict(role='HUMAN', prompt=GRADER_TEMPLATE), + ], + ), + ), + dataset_cfg=dict( + type=NejmaibenchDataset, + path='opencompass/nejmaibench', + prompt_mode='zero-shot', + reader_cfg=reader_cfg, + ), + judge_cfg=dict(), + dict_postprocessor=dict(type=generic_llmjudge_postprocess), + ), +) + + +nejmaibench_dataset = dict( + type=NejmaibenchDataset, + abbr='nejmaibench', + path='opencompass/nejmaibench', + prompt_mode='zero-shot', + reader_cfg=reader_cfg, + infer_cfg=infer_cfg, + eval_cfg=eval_cfg, + +) + +nejmaibench_datasets = [nejmaibench_dataset] diff --git a/opencompass/configs/models/gemma/vllm_gemma_3_12b_it.py b/opencompass/configs/models/gemma/vllm_gemma_3_12b_it.py new file mode 100644 index 00000000..2914640f --- /dev/null +++ b/opencompass/configs/models/gemma/vllm_gemma_3_12b_it.py @@ -0,0 +1,16 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='gemma-3-12b-it-vllm', + path='google/gemma-3-12b-it', + model_kwargs=dict(tensor_parallel_size=4, + # for long context + rope_scaling={'factor': 8.0, 'rope_type': 'linear'}), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +] diff --git a/opencompass/configs/models/gemma/vllm_gemma_3_27b_it.py b/opencompass/configs/models/gemma/vllm_gemma_3_27b_it.py new file mode 100644 index 00000000..b6f4b93b --- /dev/null +++ b/opencompass/configs/models/gemma/vllm_gemma_3_27b_it.py @@ -0,0 +1,16 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='gemma-3-27b-it-vllm', + path='google/gemma-3-27b-it', + model_kwargs=dict(tensor_parallel_size=4, + # for long context + rope_scaling={'factor': 8.0, 'rope_type': 'linear'}), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +] diff --git a/opencompass/configs/models/gemma/vllm_gemma_3_4b_it.py b/opencompass/configs/models/gemma/vllm_gemma_3_4b_it.py new file mode 100644 index 00000000..22516ff7 --- /dev/null +++ b/opencompass/configs/models/gemma/vllm_gemma_3_4b_it.py @@ -0,0 +1,17 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='gemma-3-4b-it-vllm', + path='google/gemma-3-4b-it', + model_kwargs=dict(tensor_parallel_size=2, + # for long context + rope_scaling={'factor': 8.0, 'rope_type': 'linear'}), + max_seq_len=140000, + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=2), + ) +] diff --git a/opencompass/configs/models/hf_internlm/lmdeploy_internlm3_8b_instruct_128k.py b/opencompass/configs/models/hf_internlm/lmdeploy_internlm3_8b_instruct_128k.py new file mode 100644 index 00000000..1cc4e251 --- /dev/null +++ b/opencompass/configs/models/hf_internlm/lmdeploy_internlm3_8b_instruct_128k.py @@ -0,0 +1,19 @@ +from opencompass.models import TurboMindModelwithChatTemplate + +models = [ + dict( + type=TurboMindModelwithChatTemplate, + abbr='internlm3-8b-instruct-turbomind', + path='internlm/internlm3-8b-instruct', + engine_config=dict(session_len=142000, max_batch_size=1, tp=2, + # for long context + rope_scaling_factor=6.0), + gen_config=dict( + top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=8192 + ), + max_seq_len=142000, + max_out_len=8192, + batch_size=1, + run_cfg=dict(num_gpus=2), + ) +] diff --git a/opencompass/configs/models/hf_internlm/lmdeploy_oreal_32b.py b/opencompass/configs/models/hf_internlm/lmdeploy_oreal_32b.py new file mode 100644 index 00000000..1d10bd94 --- /dev/null +++ b/opencompass/configs/models/hf_internlm/lmdeploy_oreal_32b.py @@ -0,0 +1,20 @@ +from opencompass.models import TurboMindModelwithChatTemplate +from opencompass.utils.text_postprocessors import extract_non_reasoning_content + +models = [ + dict( + type=TurboMindModelwithChatTemplate, + abbr='OREAL-32B', + path='internlm/OREAL-32B', + engine_config=dict(session_len=32768, max_batch_size=16, tp=4), + gen_config=dict(top_k=1, + temperature=1e-6, + top_p=0.9, + max_new_tokens=32768), + max_seq_len=32768, + max_out_len=32768, + batch_size=16, + run_cfg=dict(num_gpus=4), + pred_postprocessor=dict(type=extract_non_reasoning_content) + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py new file mode 100644 index 00000000..6dec3743 --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2.5-14b-instruct-vllm', + path='Qwen/Qwen2.5-14B-Instruct', + model_kwargs=dict( + tensor_parallel_size=4, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py new file mode 100644 index 00000000..5c326734 --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2.5-32b-instruct-vllm', + path='Qwen/Qwen2.5-32B-Instruct', + model_kwargs=dict( + tensor_parallel_size=8, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=8), + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py new file mode 100644 index 00000000..2a4a52fa --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2_5-72b-instruct-vllm', + path='Qwen/Qwen2.5-72B-Instruct', + model_kwargs=dict( + tensor_parallel_size=8, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=8), + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py new file mode 100644 index 00000000..db21f730 --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2.5-7b-instruct-vllm', + path='Qwen/Qwen2.5-7B-Instruct', + model_kwargs=dict( + tensor_parallel_size=4, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +] diff --git a/opencompass/datasets/CARDBiomedBench.py b/opencompass/datasets/CARDBiomedBench.py new file mode 100644 index 00000000..77ff9ee6 --- /dev/null +++ b/opencompass/datasets/CARDBiomedBench.py @@ -0,0 +1,30 @@ +from datasets import load_dataset + +from opencompass.registry import LOAD_DATASET + +from .base import BaseDataset + + +def _parse(item, prompt_mode): + item['expert'] = item['Bio_Category'] + item['start'] = chr(65) + item['end'] = chr(65 + len(item.get('choices', {'label': []})['label']) - + 1) + item['prompt_mode'] = prompt_mode + return item + + +@LOAD_DATASET.register_module() +class CARDBiomedBenchDataset(BaseDataset): + + @staticmethod + def load(path: str, prompt_mode: str, **kwargs): + data_files = {'test': 'data/CARDBiomedBench.csv'} + dataset = load_dataset(path, data_files=data_files, split='test') + # dataset = dataset.select(range(200)) + if prompt_mode == 'zero-shot': + dataset = dataset.map(lambda item: _parse(item, prompt_mode), + load_from_cache_file=False) + elif prompt_mode == 'few-shot': + pass # TODO: Implement few-shot prompt + return dataset diff --git a/opencompass/datasets/ClinicBench.py b/opencompass/datasets/ClinicBench.py new file mode 100644 index 00000000..86ef5082 --- /dev/null +++ b/opencompass/datasets/ClinicBench.py @@ -0,0 +1,19 @@ +from datasets import load_dataset + +from opencompass.registry import LOAD_DATASET + +from .base import BaseDataset + + +@LOAD_DATASET.register_module() +class ClinicBenchDataset(BaseDataset): + + @staticmethod + def load_single(path): + dataset = load_dataset(path)['train'] + return dataset + + @staticmethod + def load(path): + dataset = ClinicBenchDataset.load_single(path) + return dataset diff --git a/opencompass/datasets/PubMedQA.py b/opencompass/datasets/PubMedQA.py new file mode 100644 index 00000000..b0db32e3 --- /dev/null +++ b/opencompass/datasets/PubMedQA.py @@ -0,0 +1,34 @@ +from datasets import Dataset, load_dataset + +from opencompass.registry import LOAD_DATASET + +from .base import BaseDataset + + +@LOAD_DATASET.register_module() +class PubMedQADataset(BaseDataset): + + @staticmethod + def load_single(path): + dataset = [] + ds = load_dataset(path, 'pqa_labeled') + for data in ds['train']: + data['question'] = (f"CONTEXTS: {data['context']}\n" + f"QUESTION: {data['question']}") + choices = 'A. yes\nB. no\nC. maybe' + data['choices'] = choices + if data['final_decision'] == 'yes': + data['label'] = 'A. yes' + elif data['final_decision'] == 'no': + data['label'] = 'B. no' + else: + data['label'] = 'C. maybe' + + dataset.append(data) + + return Dataset.from_list(dataset) + + @staticmethod + def load(path): + dataset = PubMedQADataset.load_single(path) + return dataset diff --git a/opencompass/datasets/ScienceQA.py b/opencompass/datasets/ScienceQA.py new file mode 100644 index 00000000..1bc9c952 --- /dev/null +++ b/opencompass/datasets/ScienceQA.py @@ -0,0 +1,32 @@ +from datasets import Dataset, load_dataset + +from opencompass.registry import LOAD_DATASET + +from .base import BaseDataset + + +@LOAD_DATASET.register_module() +class ScienceQADataset(BaseDataset): + + @staticmethod + def load_single(path): + dataset = [] + ds = load_dataset(path) + for data in ds['test']: + if data['image'] is None: + data['label'] = chr(65 + data['answer'] + ) + '. ' + data['choices'][data['answer']] + choices = '' + for i in range(len(data['choices'])): + choices += chr(65 + i) + '. ' + data['choices'][i] + '\n' + data['choices'] = choices + # print(data) + + dataset.append(data) + + return Dataset.from_list(dataset) + + @staticmethod + def load(path): + dataset = ScienceQADataset.load_single(path) + return dataset diff --git a/opencompass/datasets/__init__.py b/opencompass/datasets/__init__.py index 74bb416b..0426f377 100644 --- a/opencompass/datasets/__init__.py +++ b/opencompass/datasets/__init__.py @@ -16,6 +16,7 @@ from .boolq import * # noqa: F401, F403 from .bustum import * # noqa: F401, F403 from .c3 import * # noqa: F401, F403 from .calm import * # noqa: F401, F403 +from .CARDBiomedBench import CARDBiomedBenchDataset # noqa: F401 from .cb import * # noqa: F401, F403 from .ceval import * # noqa: F401, F403 from .charm import * # noqa: F401, F403 @@ -111,6 +112,7 @@ from .musr import * # noqa: F401, F403 from .narrativeqa import * # noqa: F401, F403 from .natural_question import * # noqa: F401, F403 from .natural_question_cn import * # noqa: F401, F403 +from .nejmaibench import * # noqa: F401, F403 from .NPHardEval import * # noqa: F401, F403 from .obqa import * # noqa: F401, F403 from .olymmath import * # noqa: F401, F403 diff --git a/opencompass/datasets/nejmaibench.py b/opencompass/datasets/nejmaibench.py new file mode 100644 index 00000000..768f4688 --- /dev/null +++ b/opencompass/datasets/nejmaibench.py @@ -0,0 +1,139 @@ +import re + +import pandas as pd +from datasets import Dataset + +from opencompass.openicl import BaseEvaluator +from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS +from opencompass.utils import get_data_path + +from .base import BaseDataset + + +def _parse(item, prompt_mode): + # 1. 从 Choices 字符串里按行拆分出每个选项 + raw_choices = item.get('Choices', '') + # 去掉首尾空白并按行分割,过滤掉空行 + lines = [ + line.strip() for line in raw_choices.strip().splitlines() + if line.strip() + ] + + # 2. 用正则去掉行首的 "A. "/"B. " 等前缀,只保留选项内容 + options_list = [re.sub(r'^[A-Z]\.\s*', '', line) for line in lines] + + # 3. 写回 item + item['options'] = options_list + + # 4. 重建带标号的选项字符串 + options_str = '\n'.join(f'{chr(65 + i)}. {opt}' + for i, opt in enumerate(options_list)) + + # 5. 构造 question、label、prompt_mode、start、end + item['question'] = f"{item['Question']}\n{options_str}" + item['label'] = item['Answer'] + item['prompt_mode'] = prompt_mode + item['start'] = chr(65) + item['end'] = chr(65 + len(options_list) - 1) + return item + + +@LOAD_DATASET.register_module() +class NejmaibenchDataset(BaseDataset): + + @staticmethod + def load(path: str, prompt_mode: str = 'zero-shot', **kwargs): + # 读取 CSV 文件为 DataFrame,并将 NaN 转为空字符串 + path = get_data_path(path) + df = pd.read_csv(path, encoding='utf-8') + df = df.fillna('') + + # 转换为字典列表 + data_list = df.to_dict(orient='records') + + # 将数据列表包装为 Dataset + dataset = Dataset.from_list(data_list) + + # 根据提示模式进行解析 + if prompt_mode == 'zero-shot': + dataset = dataset.map(lambda item: _parse(item, prompt_mode)) + elif prompt_mode == 'few-shot': + pass # TODO: Implement few-shot prompt handling + return dataset + + +class NejmaibenchEvaluator(BaseEvaluator): + + def score(self, predictions, references, test_set): + method = test_set['prompt_mode'][0] + + if len(predictions) != len(references): + return {'error': 'preds and refrs have different length'} + correct = 0 + count = 0 + details = [] + for idx, (i, j) in enumerate(zip(predictions, references)): + i = answer_cleansing(method, i, test_set['options'][idx], + test_set['label'][idx]) + detail = { + 'pred': i, + 'answer': j, + 'correct': False, + 'Subject': test_set['Subject'][idx], + } + count += 1 + if i == j: + correct += 1 + detail['correct'] = True + details.append(detail) + result = {'accuracy': 100 * correct / count, 'details': details} + return result + + +@TEXT_POSTPROCESSORS.register_module() +def answer_cleansing( + method: str, + prediction: str, + options: list, + label: str, +) -> str: + + # Clean up unwanted phrases in the prediction + for unwanted_phrase in [ + 'I understand', + 'A through J', + 'A through E', + 'A through D', + ]: + prediction = prediction.replace(unwanted_phrase, '') + + options_num = len(options) + options = [chr(65 + i) for i in range(options_num)] + options_str = r'\b(' + '|'.join(options) + r')\b' + prediction = re.findall(options_str, prediction) + + if len(prediction) == 0: + prediction = [] + return prediction + else: + # If there is a "label" and its length is 1, + # process prediction accordingly + if len(label) == 1: + if method == 'few-shot': + answer_flag = True if len(prediction) > 1 else False + # choose the first or last element based on the answer_flag + if answer_flag: + prediction = [prediction[0]] + else: + prediction = [prediction[-1]] + elif method == 'zero-shot': + # choose the first element in list + prediction = [prediction[0]] + else: + raise ValueError('Method is not properly defined ...') + + # Remove trailing period if it exists + if prediction[0] and prediction[0].endswith('.'): + prediction[0] = prediction[0][:-1] + + return prediction[0] diff --git a/opencompass/utils/datasets_info.py b/opencompass/utils/datasets_info.py index b80c756a..2144483b 100644 --- a/opencompass/utils/datasets_info.py +++ b/opencompass/utils/datasets_info.py @@ -446,6 +446,11 @@ DATASETS_MAPPING = { "hf_id": "", "local": "./data/ChemBench4K", }, + "opencompass/nejmaibench": { + "ms_id": "", + "hf_id": "", + "local": "./data/nejmaibench/NEJM_All_Questions_And_Answers.csv", + }, "opencompass/medbullets": { "ms_id": "", "hf_id": "", @@ -803,6 +808,11 @@ DATASETS_URL = { "url": "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/ChemBench4K.zip", "md5": "fc23fd21b2566a5dbbebfa4601d7779c" + }, + "nejmaibench": { + "url": + "http://opencompass.oss-cn-shanghai.aliyuncs.com/datasets/data/nejmaibench.zip", + "md5": "e6082cae3596b3ebea73e23ba445b99e" } }