mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Update] Update O1-style Benchmark and Prompts (#1742)
* Update JuderBench * Support O1-style Prompts * Update Code * Update OpenAI * Update BigCodeBench * Update BigCodeBench * Update BigCodeBench * Update BigCodeBench * Update BigCodeBench * Update * Update * Update * Update
This commit is contained in:
parent
f333be177c
commit
0d8df541bc
@ -0,0 +1,123 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import CMMLUDataset
|
||||
from opencompass.utils.text_postprocessors import match_answer_pattern
|
||||
from opencompass.openicl.icl_evaluator import LMEvaluator
|
||||
from opencompass.datasets import generic_llmjudge_postprocess
|
||||
|
||||
cmmlu_subject_mapping = {
|
||||
'anatomy': '解剖学',
|
||||
'astronomy': '天文学',
|
||||
'college_actuarial_science': '大学精算学',
|
||||
'college_engineering_hydrology': '大学工程水文学',
|
||||
'college_mathematics': '大学数学',
|
||||
'college_medical_statistics': '大学医学统计',
|
||||
'computer_science': '计算机科学',
|
||||
'conceptual_physics': '概念物理学',
|
||||
'electrical_engineering': '电气工程',
|
||||
'elementary_mathematics': '初等数学',
|
||||
'genetics': '遗传学',
|
||||
'high_school_biology': '高中生物',
|
||||
'high_school_chemistry': '高中化学',
|
||||
'high_school_mathematics': '高中数学',
|
||||
'high_school_physics': '高中物理学',
|
||||
'machine_learning': '机器学习',
|
||||
'virology': '病毒学',
|
||||
}
|
||||
|
||||
QUERY_TEMPLATE = """
|
||||
你回答的最后一行**必须**是以下格式 '答案: $选项' (不带引号), 其中选项是ABCD之一.
|
||||
|
||||
{question}
|
||||
|
||||
A) {A}
|
||||
B) {B}
|
||||
C) {C}
|
||||
D) {D}
|
||||
""".strip()
|
||||
|
||||
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
<Original Question Begin>: \n {question}\n A) {A}\n B) {B}\n C) {C}\n D) {D}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
cmmlu_all_sets = list(cmmlu_subject_mapping.keys())
|
||||
|
||||
cmmlu_datasets = []
|
||||
for _name in cmmlu_all_sets:
|
||||
_ch_name = cmmlu_subject_mapping[_name]
|
||||
prompt_prefix = f'请回答以下关于{_ch_name}的单项选择题, '
|
||||
cmmlu_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=prompt_prefix+QUERY_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer),
|
||||
)
|
||||
|
||||
cmmlu_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LMEvaluator,
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt = GRADER_TEMPLATE
|
||||
),
|
||||
]),
|
||||
),
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
cmmlu_datasets.append(
|
||||
dict(
|
||||
type=CMMLUDataset,
|
||||
path='opencompass/cmmlu',
|
||||
name=_name,
|
||||
abbr=f'cmmlu-{_name}',
|
||||
reader_cfg=dict(
|
||||
input_columns=['question', 'A', 'B', 'C', 'D'],
|
||||
output_column='answer',
|
||||
train_split='dev',
|
||||
test_split='test'),
|
||||
infer_cfg=cmmlu_infer_cfg,
|
||||
eval_cfg=cmmlu_eval_cfg,
|
||||
mode='singlescore',
|
||||
))
|
||||
|
||||
del _name, _ch_name
|
@ -0,0 +1,36 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import HumanevalDataset, HumanEvalEvaluator, humaneval_postprocess_v3
|
||||
|
||||
humaneval_reader_cfg = dict(
|
||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||
|
||||
# TODO: allow empty output-column
|
||||
humaneval_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='Read the following function signature and docstring, and fully implement the function described. Your response should only contain the code for this function.\n{prompt}'),
|
||||
])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=8192))
|
||||
|
||||
humaneval_eval_cfg = dict(
|
||||
evaluator=dict(type=HumanEvalEvaluator),
|
||||
pred_role='BOT',
|
||||
k=[1, 10, 100], # the parameter only for humaneval
|
||||
pred_postprocessor=dict(type=humaneval_postprocess_v3),
|
||||
)
|
||||
|
||||
humaneval_datasets = [
|
||||
dict(
|
||||
abbr='openai_humaneval_o1_style',
|
||||
type=HumanevalDataset,
|
||||
path='opencompass/humaneval',
|
||||
reader_cfg=humaneval_reader_cfg,
|
||||
infer_cfg=humaneval_infer_cfg,
|
||||
eval_cfg=humaneval_eval_cfg)
|
||||
]
|
@ -0,0 +1,165 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import (
|
||||
LCBCodeGenerationDataset,
|
||||
LCBCodeExecutionDataset,
|
||||
LCBTestOutputPredictionDataset,
|
||||
LCBCodeGenerationEvaluator,
|
||||
LCBCodeExecutionEvaluator,
|
||||
LCBTestOutputEvaluator
|
||||
)
|
||||
from opencompass.datasets.livecodebench import TestOutputPromptConstants
|
||||
|
||||
|
||||
lcb_code_generation_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'question_content',
|
||||
'format_prompt',
|
||||
],
|
||||
# output_column='evaluation_sample',
|
||||
output_column='question_id',
|
||||
)
|
||||
|
||||
SYSTEM_MESSAGE_GENERIC = f'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
prompt_template = '### Question:\n{question_content}\n\n{format_prompt}' + \
|
||||
'### Answer: (use the provided format with backticks)\n\n'
|
||||
|
||||
|
||||
# Code Generation Tasks
|
||||
lcb_code_generation_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt=prompt_template
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_generation_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeGenerationEvaluator,
|
||||
num_process_evaluate=4,
|
||||
timeout=6,
|
||||
release_version='release_split_v4',
|
||||
extractor_version='v2',
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeGeneration_dataset = dict(
|
||||
type=LCBCodeGenerationDataset,
|
||||
abbr='lcb_code_generation_split_v4',
|
||||
path='opencompass/code_generation_lite',
|
||||
reader_cfg=lcb_code_generation_reader_cfg,
|
||||
infer_cfg=lcb_code_generation_infer_cfg,
|
||||
eval_cfg=lcb_code_generation_eval_cfg,
|
||||
release_version='release_split_v4',
|
||||
)
|
||||
|
||||
# Code Execution Dataset
|
||||
lcb_code_execution_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
lcb_code_execution_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
prompt='You are an expert at Python programming, code execution, test case generation, and fuzzing.',
|
||||
fallback_role='HUMAN',
|
||||
),
|
||||
],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_code_execution_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBCodeExecutionEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBCodeExecution_dataset = dict(
|
||||
type=LCBCodeExecutionDataset,
|
||||
abbr='lcb_code_execution',
|
||||
path='opencompass/execution-v2',
|
||||
reader_cfg=lcb_code_execution_reader_cfg,
|
||||
infer_cfg=lcb_code_execution_infer_cfg,
|
||||
eval_cfg=lcb_code_execution_eval_cfg,
|
||||
)
|
||||
|
||||
# TestOuputput Dataset
|
||||
lcb_test_output_reader_cfg = dict(
|
||||
input_columns=[
|
||||
'prompt',
|
||||
],
|
||||
output_column='evaluation_sample',
|
||||
)
|
||||
|
||||
system_prompt = 'You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.'
|
||||
|
||||
lcb_test_output_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
# begin=[
|
||||
# dict(
|
||||
# role='SYSTEM',
|
||||
# prompt=system_prompt
|
||||
# ),
|
||||
# ],
|
||||
round=[
|
||||
dict(
|
||||
role='HUMAN',
|
||||
prompt='{prompt}'
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=1024)
|
||||
)
|
||||
|
||||
lcb_test_output_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LCBTestOutputEvaluator,
|
||||
),
|
||||
pred_role='BOT',
|
||||
)
|
||||
|
||||
LCBTestOutput_dataset = dict(
|
||||
type=LCBTestOutputPredictionDataset,
|
||||
abbr='lcb_test_output',
|
||||
path='opencompass/test_generation',
|
||||
reader_cfg=lcb_test_output_reader_cfg,
|
||||
infer_cfg=lcb_test_output_infer_cfg,
|
||||
eval_cfg=lcb_test_output_eval_cfg,
|
||||
)
|
||||
|
||||
LCB_datasets = [
|
||||
LCBCodeGeneration_dataset,
|
||||
]
|
@ -0,0 +1,49 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
|
||||
from opencompass.datasets.livemathbench import LiveMathBenchDataset, LiveMathBenchEvaluator
|
||||
|
||||
|
||||
livemathbench_reader_cfg = dict(
|
||||
input_columns=['prompt'],
|
||||
output_column='answer'
|
||||
)
|
||||
|
||||
livemathbench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='{prompt}'),
|
||||
]
|
||||
)
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(
|
||||
type=GenInferencer,
|
||||
max_out_len=8192,
|
||||
temperature=1.0
|
||||
)
|
||||
)
|
||||
|
||||
livemathbench_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=LiveMathBenchEvaluator,
|
||||
model_name='Qwen/Qwen2.5-72B-Instruct',
|
||||
url=['http://172.30.40.154:23333/v1/'] #'https://api.openai.com/v1/'
|
||||
)
|
||||
)
|
||||
|
||||
livemathbench_datasets = [
|
||||
dict(
|
||||
type=LiveMathBenchDataset,
|
||||
abbr='LiveMathBench-k1-n1',
|
||||
path='opencompass/LiveMathBench202412',
|
||||
k=1, # K@Pass
|
||||
n=1, # Run times
|
||||
reader_cfg=livemathbench_reader_cfg,
|
||||
infer_cfg=livemathbench_infer_cfg,
|
||||
eval_cfg=livemathbench_eval_cfg
|
||||
)
|
||||
]
|
@ -0,0 +1,16 @@
|
||||
from opencompass.models import TurboMindModelwithChatTemplate
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=TurboMindModelwithChatTemplate,
|
||||
abbr='llama-3_3-70b-instruct-turbomind',
|
||||
path='meta-llama/Llama-3.3-70B-Instruct',
|
||||
engine_config=dict(max_batch_size=16, tp=4),
|
||||
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=8192),
|
||||
max_seq_len=16384,
|
||||
max_out_len=8192,
|
||||
batch_size=16,
|
||||
run_cfg=dict(num_gpus=4),
|
||||
stop_words=['<|end_of_text|>', '<|eot_id|>', '<|eom_id|>'],
|
||||
)
|
||||
]
|
@ -22,20 +22,21 @@ from .prompts import (EXTRACT_PROMPT_CN, EXTRACT_PROMPT_EN, JUDGE_PROMPT_CN,
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LiveMathBenchDataset(BaseDataset):
|
||||
dataset_splits = ['AIMC', 'CEE', 'CMO', 'MATH500', 'AIME2024']
|
||||
dataset_languages = ['cn', 'en']
|
||||
|
||||
@staticmethod
|
||||
def load(
|
||||
path: str,
|
||||
k: int,
|
||||
n: int,
|
||||
dataset_splits: List[str] = [
|
||||
'AIMC', 'CEE', 'CMO', 'MATH500', 'AIME2024'
|
||||
],
|
||||
dataset_languages: List[str] = ['cn', 'en'],
|
||||
) -> List[Dict[str, Any]]:
|
||||
dataset = []
|
||||
dataset_info = {}
|
||||
path = get_data_path(path)
|
||||
for split, language in product(LiveMathBenchDataset.dataset_splits,
|
||||
LiveMathBenchDataset.dataset_languages):
|
||||
for split, language in product(dataset_splits, dataset_languages):
|
||||
file_path = os.path.join(path, f'{split}_{language}.jsonl')
|
||||
if not os.path.exists(file_path):
|
||||
continue
|
||||
|
@ -1,5 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
@ -481,7 +482,7 @@ class OpenAISDK(OpenAI):
|
||||
key: str | List[str] = 'ENV',
|
||||
org: str | List[str] | None = None,
|
||||
meta_template: Dict | None = None,
|
||||
openai_api_base: str = OPENAISDK_API_BASE,
|
||||
openai_api_base: str | List[str] = OPENAISDK_API_BASE,
|
||||
openai_proxy_url: Optional[str] = None,
|
||||
mode: str = 'none',
|
||||
logprobs: bool | None = False,
|
||||
@ -513,6 +514,10 @@ class OpenAISDK(OpenAI):
|
||||
max_completion_tokens=max_completion_tokens)
|
||||
from openai import OpenAI
|
||||
|
||||
# support multiple api_base for acceleration
|
||||
if isinstance(openai_api_base, List):
|
||||
openai_api_base = random.choice(openai_api_base)
|
||||
|
||||
if self.proxy_url is None:
|
||||
self.openai_client = OpenAI(base_url=openai_api_base, api_key=key)
|
||||
else:
|
||||
|
@ -78,7 +78,12 @@ def check_and_rename(filepath):
|
||||
match = re.match(r'(.*)_(gen|ppl|ll|mixed)_(.*).py', base_name)
|
||||
if match:
|
||||
dataset, mode, old_hash = match.groups()
|
||||
new_hash = get_hash(filepath)
|
||||
try:
|
||||
new_hash = get_hash(filepath)
|
||||
except Exception:
|
||||
print(f'Failed to get hash for {filepath}')
|
||||
raise ModuleNotFoundError
|
||||
|
||||
if not new_hash:
|
||||
return None, None
|
||||
if old_hash != new_hash:
|
||||
|
Loading…
Reference in New Issue
Block a user