mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
update
This commit is contained in:
parent
c3779ebfc1
commit
f93668337a
@ -90,7 +90,7 @@ repos:
|
||||
- linkify-it-py
|
||||
exclude: configs/
|
||||
- repo: https://gitee.com/openmmlab/mirrors-docformatter
|
||||
rev: v1.3.1
|
||||
rev: v1.7.7
|
||||
hooks:
|
||||
- id: docformatter
|
||||
args: ["--in-place", "--wrap-descriptions", "79"]
|
||||
|
@ -91,7 +91,7 @@ repos:
|
||||
- linkify-it-py
|
||||
exclude: configs/
|
||||
- repo: https://github.com/myint/docformatter
|
||||
rev: v1.3.1
|
||||
rev: v1.7.7
|
||||
hooks:
|
||||
- id: docformatter
|
||||
args: ["--in-place", "--wrap-descriptions", "79"]
|
||||
@ -116,15 +116,9 @@ repos:
|
||||
args:
|
||||
- --root_folder
|
||||
- opencompass/configs/datasets
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.23.1
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
entry: "gitleaks dir"
|
||||
args: ["--verbose", "--redact=50"]
|
||||
# - repo: https://github.com/open-mmlab/pre-commit-hooks
|
||||
# rev: v0.2.0 # Use the ref you want to point at
|
||||
# hooks:
|
||||
# - id: check-algo-readme
|
||||
# - id: check-copyright
|
||||
# args: ["mmocr", "tests", "tools"] # these directories will be checked
|
||||
# args: ["mmocr", "tests", "tools"] # these directories will be checked
|
@ -0,0 +1,181 @@
|
||||
from opencompass.datasets.supergpqa.supergpqa import (
|
||||
SuperGPQADataset,
|
||||
supergpqa_llmjudge_postprocess,
|
||||
)
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.evaluator import GenericLLMEvaluator
|
||||
|
||||
field_list = [
|
||||
'Electronic Science and Technology',
|
||||
'Philosophy',
|
||||
'Traditional Chinese Medicine',
|
||||
'Applied Economics',
|
||||
'Mathematics',
|
||||
'Physics',
|
||||
'Clinical Medicine',
|
||||
'Computer Science and Technology',
|
||||
'Information and Communication Engineering',
|
||||
'Control Science and Engineering',
|
||||
'Theoretical Economics',
|
||||
'Law',
|
||||
'History',
|
||||
'Basic Medicine',
|
||||
'Education',
|
||||
'Materials Science and Engineering',
|
||||
'Electrical Engineering',
|
||||
'Systems Science',
|
||||
'Power Engineering and Engineering Thermophysics',
|
||||
'Military Science',
|
||||
'Biology',
|
||||
'Business Administration',
|
||||
'Language and Literature',
|
||||
'Public Health and Preventive Medicine',
|
||||
'Political Science',
|
||||
'Chemistry',
|
||||
'Hydraulic Engineering',
|
||||
'Chemical Engineering and Technology',
|
||||
'Pharmacy',
|
||||
'Geography',
|
||||
'Art Studies',
|
||||
'Architecture',
|
||||
'Forestry Engineering',
|
||||
'Public Administration',
|
||||
'Oceanography',
|
||||
'Journalism and Communication',
|
||||
'Nuclear Science and Technology',
|
||||
'Weapon Science and Technology',
|
||||
'Naval Architecture and Ocean Engineering',
|
||||
'Environmental Science and Engineering',
|
||||
'Transportation Engineering',
|
||||
'Geology',
|
||||
'Physical Oceanography',
|
||||
'Musicology',
|
||||
'Stomatology',
|
||||
'Aquaculture',
|
||||
'Mechanical Engineering',
|
||||
'Aeronautical and Astronautical Science and Technology',
|
||||
'Civil Engineering',
|
||||
'Mechanics',
|
||||
'Petroleum and Natural Gas Engineering',
|
||||
'Sociology',
|
||||
'Food Science and Engineering',
|
||||
'Agricultural Engineering',
|
||||
'Surveying and Mapping Science and Technology',
|
||||
'Metallurgical Engineering',
|
||||
'Library, Information and Archival Management',
|
||||
'Mining Engineering',
|
||||
'Astronomy',
|
||||
'Geological Resources and Geological Engineering',
|
||||
'Atmospheric Science',
|
||||
'Optical Engineering',
|
||||
'Animal Husbandry',
|
||||
'Geophysics',
|
||||
'Crop Science',
|
||||
'Management Science and Engineering',
|
||||
'Psychology',
|
||||
'Forestry',
|
||||
'Textile Science and Engineering',
|
||||
'Veterinary Medicine',
|
||||
'Instrument Science and Technology',
|
||||
'Physical Education',
|
||||
]
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: {infer_prompt}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer_letter}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
# Reader configuration
|
||||
reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question',
|
||||
'options',
|
||||
'discipline',
|
||||
'field',
|
||||
'subfield',
|
||||
'difficulty',
|
||||
'infer_prompt',
|
||||
'prompt_mode',
|
||||
],
|
||||
output_column='answer_letter',
|
||||
)
|
||||
|
||||
# Inference configuration
|
||||
infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{infer_prompt}',
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
supergpqa_datasets = []
|
||||
for field in field_list:
|
||||
supergpqa_datasets.append(
|
||||
dict(
|
||||
type=SuperGPQADataset,
|
||||
abbr=f'supergpqa_{field.replace(" ", "_")}',
|
||||
field=field,
|
||||
path='m-a-p/SuperGPQA',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
infer_cfg=infer_cfg,
|
||||
eval_cfg=dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=SuperGPQADataset,
|
||||
field=field,
|
||||
path='m-a-p/SuperGPQA',
|
||||
prompt_mode='zero-shot',
|
||||
reader_cfg=reader_cfg,
|
||||
),
|
||||
judge_cfg=dict(),
|
||||
dict_postprocessor=dict(
|
||||
type=supergpqa_llmjudge_postprocess
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
89
opencompass/configs/summarizers/groups/supergpqa.py
Normal file
89
opencompass/configs/summarizers/groups/supergpqa.py
Normal file
@ -0,0 +1,89 @@
|
||||
supergpqa_summary_groups = []
|
||||
|
||||
# gaokao-bench
|
||||
supergpqa_weights = {
|
||||
'Electronic_Science_and_Technology': 246,
|
||||
'Philosophy': 347,
|
||||
'Traditional_Chinese_Medicine': 268,
|
||||
'Applied_Economics': 723,
|
||||
'Mathematics': 2622,
|
||||
'Physics': 2845,
|
||||
'Clinical_Medicine': 1218,
|
||||
'Computer_Science_and_Technology': 763,
|
||||
'Information_and_Communication_Engineering': 504,
|
||||
'Control_Science_and_Engineering': 190,
|
||||
'Theoretical_Economics': 150,
|
||||
'Law': 591,
|
||||
'History': 674,
|
||||
'Basic_Medicine': 567,
|
||||
'Education': 247,
|
||||
'Materials_Science_and_Engineering': 289,
|
||||
'Electrical_Engineering': 556,
|
||||
'Systems_Science': 50,
|
||||
'Power_Engineering_and_Engineering_Thermophysics': 684,
|
||||
'Military_Science': 205,
|
||||
'Biology': 1120,
|
||||
'Business_Administration': 142,
|
||||
'Language_and_Literature': 440,
|
||||
'Public_Health_and_Preventive_Medicine': 292,
|
||||
'Political_Science': 65,
|
||||
'Chemistry': 1769,
|
||||
'Hydraulic_Engineering': 218,
|
||||
'Chemical_Engineering_and_Technology': 410,
|
||||
'Pharmacy': 278,
|
||||
'Geography': 133,
|
||||
'Art_Studies': 603,
|
||||
'Architecture': 162,
|
||||
'Forestry_Engineering': 100,
|
||||
'Public_Administration': 151,
|
||||
'Oceanography': 200,
|
||||
'Journalism_and_Communication': 207,
|
||||
'Nuclear_Science_and_Technology': 107,
|
||||
'Weapon_Science_and_Technology': 100,
|
||||
'Naval_Architecture_and_Ocean_Engineering': 138,
|
||||
'Environmental_Science_and_Engineering': 189,
|
||||
'Transportation_Engineering': 251,
|
||||
'Geology': 341,
|
||||
'Physical_Oceanography': 50,
|
||||
'Musicology': 426,
|
||||
'Stomatology': 132,
|
||||
'Aquaculture': 56,
|
||||
'Mechanical_Engineering': 176,
|
||||
'Aeronautical_and_Astronautical_Science_and_Technology': 119,
|
||||
'Civil_Engineering': 358,
|
||||
'Mechanics': 908,
|
||||
'Petroleum_and_Natural_Gas_Engineering': 112,
|
||||
'Sociology': 143,
|
||||
'Food_Science_and_Engineering': 109,
|
||||
'Agricultural_Engineering': 104,
|
||||
'Surveying_and_Mapping_Science_and_Technology': 168,
|
||||
'Metallurgical_Engineering': 255,
|
||||
'Library,_Information_and_Archival_Management': 150,
|
||||
'Mining_Engineering': 100,
|
||||
'Astronomy': 405,
|
||||
'Geological_Resources_and_Geological_Engineering': 50,
|
||||
'Atmospheric_Science': 203,
|
||||
'Optical_Engineering': 376,
|
||||
'Animal_Husbandry': 103,
|
||||
'Geophysics': 100,
|
||||
'Crop_Science': 145,
|
||||
'Management_Science_and_Engineering': 58,
|
||||
'Psychology': 87,
|
||||
'Forestry': 131,
|
||||
'Textile_Science_and_Engineering': 100,
|
||||
'Veterinary_Medicine': 50,
|
||||
'Instrument_Science_and_Technology': 50,
|
||||
'Physical_Education': 150,
|
||||
}
|
||||
supergpqa_weights = {
|
||||
'supergpqa_' + k: v for k, v in supergpqa_weights.items()
|
||||
}
|
||||
supergpqa_summary_groups.append(
|
||||
{
|
||||
'name': 'SuperGPQA',
|
||||
'subsets':[[k, 'accuracy'] for k in supergpqa_weights.keys()],
|
||||
'weights': supergpqa_weights,
|
||||
}
|
||||
)
|
||||
|
||||
print(supergpqa_summary_groups)
|
@ -29,9 +29,21 @@ def _parse(item, template, prompt_mode):
|
||||
class SuperGPQADataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path: str, prompt_mode: str, **kwargs):
|
||||
def load(path: str,
|
||||
prompt_mode: str,
|
||||
discipline: str = None,
|
||||
field: str = None,
|
||||
subfield: str = None,
|
||||
**kwargs):
|
||||
dataset = load_dataset(path, split='train')
|
||||
|
||||
if discipline is not None:
|
||||
dataset = dataset.filter(lambda x: x['discipline'] == discipline)
|
||||
if field is not None:
|
||||
dataset = dataset.filter(lambda x: x['field'] == field)
|
||||
if subfield is not None:
|
||||
dataset = dataset.filter(lambda x: x['subfield'] == subfield)
|
||||
|
||||
# get prompt template
|
||||
template_path = None
|
||||
if prompt_mode == 'zero-shot':
|
||||
|
Loading…
Reference in New Issue
Block a user