add Judgebench (#2066)

* add rewardbench

* add rewardbench

* add rmb datasets

* add rmb datasets

* add judgebench

* add judgebench
This commit is contained in:
Taolin Zhang 2025-04-30 15:01:10 +08:00 committed by GitHub
parent 527a80947b
commit b6148aa198
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 224 additions and 7 deletions

View File

@ -0,0 +1,52 @@
from mmengine.config import read_base
with read_base():
from opencompass.configs.datasets.judge.judgebench import get_judgebench_datasets
from opencompass.models import HuggingFaceCausalLM, HuggingFace, HuggingFaceChatGLM3, OpenAI
from opencompass.partitioners import NaivePartitioner, SizePartitioner, NumWorkerPartitioner
from opencompass.partitioners.sub_naive import SubjectiveNaivePartitioner
from opencompass.partitioners.sub_size import SubjectiveSizePartitioner
from opencompass.partitioners.sub_num_worker import SubjectiveNumWorkerPartitioner
from opencompass.runners import LocalRunner, DLCRunner, VOLCRunner
from opencompass.runners import SlurmSequentialRunner
from opencompass.tasks import OpenICLInferTask
from opencompass.tasks.subjective_eval import SubjectiveEvalTask
from opencompass.tasks import OpenICLInferTask, OpenICLEvalTask
api_meta_template = dict(
round=[
dict(role='HUMAN', api_role='HUMAN'),
dict(role='BOT', api_role='BOT', generate=True),
]
)
datasets = [*get_judgebench_datasets]
from opencompass.models import TurboMindModelwithChatTemplate
models = [
dict(
type=TurboMindModelwithChatTemplate,
abbr='qwen-7b-hf',
path='Qwen/Qwen-7B',
engine_config=dict(session_len=16384, max_batch_size=16, tp=1),
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=2048),
max_seq_len=16384,
max_out_len=2048,
batch_size=16,
run_cfg=dict(num_gpus=1),
),
]
infer = dict(
partitioner=dict(type=NaivePartitioner),
runner=dict(
type=LocalRunner,
max_num_workers=72,
task=dict(type=OpenICLInferTask),
),
)
work_dir = './outputs/judgebench/'

View File

@ -0,0 +1,71 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import JudgeEvaluator
from opencompass.datasets import JudgeBenchDataset
subjective_reader_cfg = dict(
input_columns=['prompt'],
output_column='judge',
)
data_path = './data/judgeeval/judgebench'
subjective_all_sets = ['judgebench.json']
get_judgebench_datasets = []
prompt_choice_prefix = """
Please act as an impartial judge to evaluate the responses provided by two AI assistants to the user question below. Your evaluation should focus on the following criteria: helpfulness, relevance, accuracy, depth, creativity, and level of detail.
- Do not let the order of presentation, response length, or assistant names influence your judgment.
- Base your decision solely on how well each response addresses the users question and adheres to the instructions.
Your final reply must be structured in the following format:
{
"Choice": "[Model A or Model B]"
}
"""
prompt_choice_en = """User Question: {question}
Model A's Response: {answerA}
Model B's Response: {answerB}
Now it's your turn. Please provide selection result as required:
"""
for _name in subjective_all_sets:
subjective_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=prompt_choice_prefix + prompt_choice_en
),
]),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=4096),
)
rewardbench_eval_cfg = dict(
evaluator=dict(
type=JudgeEvaluator,
),
)
get_judgebench_datasets.append(
dict(
abbr=f'{_name.split(".")[0]}',
type=JudgeBenchDataset,
path=data_path,
name=_name,
reader_cfg=subjective_reader_cfg,
infer_cfg=subjective_infer_cfg,
eval_cfg=rewardbench_eval_cfg,
mode='singlescore',
))

View File

@ -1,10 +1,53 @@
RewardBench_summary_groups = []
_Chat_weights = {
'alpacaeval-easy': 0.32355305466237944,
'alpacaeval-length': 0.32355305466237944,
'alpacaeval-hard': 0.32355305466237944,
'mt-bench-easy': 0.011254019292604502,
'mt-bench-med': 0.018086816720257234,
}
_Chat_Hard_weights = {
'mt-bench-hard': 0.09698275862068965,
'llmbar-natural': 0.21551724137931033,
'llmbar-adver-neighbor': 0.28879310344827586,
'llmbar-adver-GPTInst': 0.19827586206896552,
'llmbar-adver-GPTOut': 0.10129310344827586,
'llmbar-adver-manual': 0.09913793103448276,
}
_Safety_weights = {
'refusals-dangerous': 0.13513513513513514,
'refusals-offensive': 0.13513513513513514,
'xstest-should-refuse': 0.20810810810810812,
'xstest-should-respond': 0.33783783783783783,
'donotanswer': 0.1837837837837838,
}
_Reasoning_weights = {
'math-prm': 0.31236897274633124,
'hep-cpp': 0.1146051712089448,
'hep-go': 0.1146051712089448,
'hep-java': 0.1146051712089448,
'hep-js': 0.1146051712089448,
'hep-python': 0.1146051712089448,
'hep-rust': 0.1146051712089448,
}
_RewardBench_weights = {'alpacaeval-easy': 0.08088826366559486,'alpacaeval-length': 0.08088826366559486,'alpacaeval-hard': 0.08088826366559486,'mt-bench-easy': 0.0028135048231511255,'mt-bench-med': 0.004521704180064309,'mt-bench-hard': 0.024245689655172414,'llmbar-natural': 0.05387931034482758,'llmbar-adver-neighbor': 0.07219827586206896,'llmbar-adver-GPTInst': 0.04956896551724138,'llmbar-adver-GPTOut': 0.025323275862068964,'llmbar-adver-manual': 0.02478448275862069,'refusals-dangerous': 0.033783783783783786,'refusals-offensive': 0.033783783783783786,'xstest-should-refuse': 0.05202702702702703,'xstest-should-respond': 0.08445945945945946,'donotanswer': 0.04594594594594595,'math-prm': 0.07809224318658281,'hep-cpp': 0.0286512928022362,'hep-go': 0.0286512928022362,'hep-java': 0.0286512928022362,'hep-js': 0.0286512928022362,'hep-python': 0.0286512928022362,'hep-rust': 0.0286512928022362,}
RewardBench_summary_groups.append({'name': 'Chat', 'subsets': list(_Chat_weights.keys()), 'weights': _Chat_weights})
RewardBench_summary_groups.append({'name': 'Chat Hard', 'subsets': list(_Chat_Hard_weights.keys()), 'weights': _Chat_Hard_weights})
RewardBench_summary_groups.append({'name': 'Safety', 'subsets': list(_Safety_weights.keys()), 'weights': _Safety_weights})
RewardBench_summary_groups.append({'name': 'Reasoning', 'subsets': list(_Reasoning_weights.keys()), 'weights': _Reasoning_weights})
RewardBench_summary_groups.append({'name': 'RewardBench', 'subsets': list(_RewardBench_weights.keys()), 'weights': _RewardBench_weights})
summarizer = dict(
dataset_abbrs=[
'Chat',
'Chat Hard',
'Safety',
'Reasoning',
'RewardBench'
],
summary_groups=RewardBench_summary_groups,

View File

@ -1,2 +1,3 @@
from .judgebench import JudgeBenchDataset # noqa: F401, F403
from .rewardbench import RewardBenchDataset # noqa: F401, F403
from .rmb import RMBDataset # noqa: F401, F403

View File

@ -0,0 +1,57 @@
# flake8: noqa
import json
import os.path as osp
import re
import numpy as np
import pandas as pd
from datasets import Dataset
from opencompass.openicl.icl_evaluator import BaseEvaluator
from opencompass.registry import (DICT_POSTPROCESSORS, ICL_EVALUATORS,
LOAD_DATASET)
from opencompass.utils import get_data_path
from ..base import BaseDataset
@LOAD_DATASET.register_module()
class JudgeBenchDataset(BaseDataset):
def load(self, path: str, name: str, *args, **kwargs):
path = get_data_path(path, local_mode=True)
filename = osp.join(path, f'{name}')
raw_data = []
with open(filename, 'r', encoding='utf-8') as f:
data = json.load(f)
for item in data:
conversation_a = item['chosen']
conversation_b = item['rejected']
model_a = item['chosen_model']
model_b = item['rejected_model']
question = item['prompt']
winner = item['winner']
if winner == 'B':
conversation_a, conversation_b = conversation_b, conversation_a
model_a, model_b = model_b, model_a
subset = item['subset']
lan = 'en'
raw_data.append({
'question': question,
'answerA': conversation_a,
'answerB': conversation_b,
'judge': {
'prompt': item['prompt'],
'Answer_A': conversation_a,
'Answer_B': conversation_b,
'subset': subset,
'winner': winner,
'model_a': model_a,
'model_b': model_b,
'dataset_name': 'rewardbench',
'lan': lan
}
})
dataset = Dataset.from_list(raw_data)
return dataset

View File

@ -51,7 +51,6 @@ class RMBEvaluator(BaseEvaluator):
def calculate_bon_accuracy(self, data):
bon_groups = defaultdict(list)
"""计算bon指标的准确率"""
for item in data:
bon_uid = item['bon_uid']
@ -61,7 +60,6 @@ class RMBEvaluator(BaseEvaluator):
if choice and gold_winner:
bon_groups[bon_uid].append(gold_winner == choice)
# 计算每个bon_uid是否全部正确
correct_bons = 0
for bon_uid, matches in bon_groups.items():
if all(matches):
@ -73,13 +71,11 @@ class RMBEvaluator(BaseEvaluator):
if len(predictions) != len(references):
return {'error': 'preds and refrs have different length'}
# 创建四个数据列表分别对应不同的subset和goal组合
bon_help_list = []
bon_harm_list = []
pair_help_list = []
pair_harm_list = []
# 根据subset和goal分类数据
for prediction, reference in zip(predictions, references):
choice = prediction.split("\"Choice\": \"Model ")[-1][0]
gold_winner = reference.get('winner', '')
@ -93,7 +89,6 @@ class RMBEvaluator(BaseEvaluator):
'pair_uid': reference.get('pair_uid', ''),
}
# 根据subset和goal将数据分配到对应的列表中
if subset == 'bon':
if goal == 'Helpfulness':
bon_help_list.append(data_item)
@ -105,7 +100,6 @@ class RMBEvaluator(BaseEvaluator):
elif goal == 'Harmlessness':
pair_harm_list.append(data_item)
# 计算四种组合的准确率
bon_help_acc = self.calculate_bon_accuracy(
bon_help_list) if bon_help_list else 0
bon_harm_acc = self.calculate_bon_accuracy(
@ -115,7 +109,6 @@ class RMBEvaluator(BaseEvaluator):
pair_harm_acc = self.calculate_pair_accuracy(
pair_harm_list) if pair_harm_list else 0
# 返回所有结果
result = {
'bon_helpfulness_accuracy':
bon_help_acc * 100,