From 7278a4ed19cb0ebc636f277548b55d380595b8eb Mon Sep 17 00:00:00 2001 From: huihui Date: Thu, 15 May 2025 08:50:05 +0000 Subject: [PATCH 1/8] healthbench --- .../healthbench_model_gen_4175e2.py | 78 +++ opencompass/datasets/__init__.py | 1 + opencompass/datasets/healthbench/common.py | 380 ++++++++++ opencompass/datasets/healthbench/drop_eval.py | 331 +++++++++ opencompass/datasets/healthbench/gpqa_eval.py | 88 +++ .../datasets/healthbench/healthbench.py | 359 ++++++++++ .../datasets/healthbench/healthbench_eval.py | 648 ++++++++++++++++++ .../healthbench/healthbench_eval_test.py | 32 + .../datasets/healthbench/healthbench_meta.py | 262 +++++++ .../healthbench/healthbench_meta_eval.py | 339 +++++++++ .../healthbench/healthbench_meta_eval_test.py | 165 +++++ .../sampler/chat_completion_sampler.py | 99 +++ .../healthbench/sampler/claude_sampler.py | 103 +++ .../sampler/o_chat_completion_sampler.py | 78 +++ .../healthbench/sampler/responses_sampler.py | 97 +++ opencompass/openicl/icl_prompt_template.py | 19 + 16 files changed, 3079 insertions(+) create mode 100644 opencompass/configs/datasets/HealthBench/healthbench_model_gen_4175e2.py create mode 100644 opencompass/datasets/healthbench/common.py create mode 100644 opencompass/datasets/healthbench/drop_eval.py create mode 100644 opencompass/datasets/healthbench/gpqa_eval.py create mode 100644 opencompass/datasets/healthbench/healthbench.py create mode 100644 opencompass/datasets/healthbench/healthbench_eval.py create mode 100644 opencompass/datasets/healthbench/healthbench_eval_test.py create mode 100644 opencompass/datasets/healthbench/healthbench_meta.py create mode 100644 opencompass/datasets/healthbench/healthbench_meta_eval.py create mode 100644 opencompass/datasets/healthbench/healthbench_meta_eval_test.py create mode 100644 opencompass/datasets/healthbench/sampler/chat_completion_sampler.py create mode 100644 opencompass/datasets/healthbench/sampler/claude_sampler.py create mode 100644 opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py create mode 100644 opencompass/datasets/healthbench/sampler/responses_sampler.py diff --git a/opencompass/configs/datasets/HealthBench/healthbench_model_gen_4175e2.py b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_4175e2.py new file mode 100644 index 00000000..229f2958 --- /dev/null +++ b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_4175e2.py @@ -0,0 +1,78 @@ +from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_prompt_template import HealthBenchTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever + + +# Reader configuration +reader_cfg = dict( + input_columns=[ + 'example_tags', 'ideal_completions_data', 'prompt', 'prompt_id', 'rubrics', 'canary' + ], + output_column='prompt_id', # useless +) + +# Inference configuration +infer_cfg = dict( + prompt_template=dict( + type=HealthBenchTemplate, + key='prompt_trans', + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer), +) + +# infer_cfg = dict( +# prompt_template=dict( +# type=PromptTemplate, +# template=dict( +# round=[ +# dict( +# role='HUMAN', +# prompt='{prompt_id}', # prompt mode: zero-shot +# ), +# dict( +# role='BOT', +# prompt='{prompt_id}', # prompt mode: zero-shot +# ), +# ], +# ), +# ), +# retriever=dict(type=ZeroRetriever), +# inferencer=dict(type=GenInferencer), +# ) + +# Evaluation configuration +eval_cfg = dict( + evaluator=dict(type=HealthBenchEvaluator), + pred_role='BOT', +) +healthbench_vanilla_dataset = dict( + type=HealthBenchDataset, + abbr='healthbench_vanilla', + path='huihuixu/healthbench', + subset='vanilla', + reader_cfg=reader_cfg, + infer_cfg=infer_cfg, + eval_cfg=eval_cfg, +) +healthbench_hard_dataset = dict( + type=HealthBenchDataset, + abbr='healthbench_hard', + path='huihuixu/healthbench', + subset='hard', + reader_cfg=reader_cfg, + infer_cfg=infer_cfg, + eval_cfg=eval_cfg, +) +healthbench_consensus_dataset = dict( + type=HealthBenchDataset, + abbr='healthbench_consensus', + path='huihuixu/healthbench', + subset='consensus', + reader_cfg=reader_cfg, + infer_cfg=infer_cfg, + eval_cfg=eval_cfg, +) + +healthbench_all_datasets = [healthbench_vanilla_dataset, healthbench_hard_dataset, healthbench_consensus_dataset ] \ No newline at end of file diff --git a/opencompass/datasets/__init__.py b/opencompass/datasets/__init__.py index b00162d1..244a2b3a 100644 --- a/opencompass/datasets/__init__.py +++ b/opencompass/datasets/__init__.py @@ -58,6 +58,7 @@ from .govrepcrs import * # noqa: F401, F403 from .gpqa import * # noqa: F401, F403 from .gsm8k import * # noqa: F401, F403 from .gsm_hard import * # noqa: F401, F403 +from .healthbench.healthbench import * from .hellaswag import * # noqa: F401, F403 from .hle import * # noqa: F401, F403 from .huggingface import * # noqa: F401, F403 diff --git a/opencompass/datasets/healthbench/common.py b/opencompass/datasets/healthbench/common.py new file mode 100644 index 00000000..28ef2201 --- /dev/null +++ b/opencompass/datasets/healthbench/common.py @@ -0,0 +1,380 @@ +import io +import os +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor, as_completed +from multiprocessing.pool import ThreadPool +from typing import Any, Callable + +import jinja2 +import numpy as np +import requests +from tqdm import tqdm + +from .types import EvalResult, Message, SamplerBase, SingleEvalResult + +QUERY_TEMPLATE_MULTICHOICE = """ +Answer the following multiple choice question. The last line of your response should be of the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before answering. + +{Question} + +A) {A} +B) {B} +C) {C} +D) {D} +""".strip() + +ANSWER_PATTERN_MULTICHOICE = r'(?i)Answer[ \t]*:[ \t]*\$?([A-D])\$?' +ANSWER_PATTERN = r'(?i)Answer\s*:\s*([^\n]+)' +MULTILINGUAL_ANSWER_PATTERN_TEMPLATE = ( + '(?i){}[ \t]*([A-D]|[أ-د]|[অ]|[ব]|[ড]|[ঢ]|[A]|[B]|[C]|[D])') +# All the different ways "Answer" is written in different languages +MULTILINGUAL_ANSWER_REGEXES = [ + 'Answer\s*:', + 'Answer\s*:​​​​​​', # Korean invisible character + 'উত্তর\s*:', + 'उत्तर\s*:', + 'উত্তরঃ', + 'উত্তর\s*:', + 'Antwort\s*:', + '답변\s*:', + '정답\s*:', + '답\s*:', + '答案\s*:', + '答案\s*:', + '答\s*:', + '答\s*:', + '答复\s*:', + '答曰\s*:', + 'الإجابة:', + 'الجواب:', + 'إجابة:', + 'الإجابة النهائية:', + 'الإجابة الصحيحة:', + 'الإجابة الصحيحة هي:', + 'الإجابة هي:', + 'الجواب النهائي:', + 'Respuesta\s*:', + 'Risposta\s*:', + '答え\s*:', + '答え\s*:', + '回答\s*:', + '回答\s*:', + '解答\s*:', + 'Jawaban\s*:', + 'Réponse\s*:', + 'Resposta\s*:', + 'Jibu\s*:', + 'Idahun\s*:', + 'Ìdáhùn\s*:', + 'Idáhùn\s*:', + 'Àmọ̀nà\s*:', + 'Àdáhùn\s*:', + 'Ànúgọ\s*:', + 'Àṣàyàn\s*:', +] + +EQUALITY_TEMPLATE = r""" +Look at the following two expressions (answers to a math problem) and judge whether they are equivalent. Only perform trivial simplifications + +Examples: + + Expression 1: $2x+3$ + Expression 2: $3+2x$ + +Yes + + Expression 1: 3/2 + Expression 2: 1.5 + +Yes + + Expression 1: $x^2+2x+1$ + Expression 2: $y^2+2y+1$ + +No + + Expression 1: $x^2+2x+1$ + Expression 2: $(x+1)^2$ + +Yes + + Expression 1: 3245/5 + Expression 2: 649 + +No +(these are actually equal, don't mark them equivalent if you need to do nontrivial simplifications) + + Expression 1: 2/(-3) + Expression 2: -2/3 + +Yes +(trivial simplifications are allowed) + + Expression 1: 72 degrees + Expression 2: 72 + +Yes +(give benefit of the doubt to units) + + Expression 1: 64 + Expression 2: 64 square feet + +Yes +(give benefit of the doubt to units) + +--- + +YOUR TASK + + +Respond with only "Yes" or "No" (without quotes). Do not include a rationale. + + Expression 1: %(expression1)s + Expression 2: %(expression2)s +""".strip() + +HTML_JINJA = """ +

Prompt conversation

+{% for message in prompt_messages %} +{{ message_to_html(message) | safe }} +{% endfor %} +

Sampled message

+{{ message_to_html(next_message) | safe }} +

Results

+

Correct Answer: {{ correct_answer }}

+

Extracted Answer: {{ extracted_answer }}

+

Score: {{ score }}

+""" + + +def format_multichoice_question(row): + return QUERY_TEMPLATE_MULTICHOICE.format(**row) + + +def check_equality(sampler: SamplerBase, expr1: str, expr2: str): + prompt = EQUALITY_TEMPLATE % {'expression1': expr1, 'expression2': expr2} + sampler_response = sampler([dict(content=prompt, role='user')]) + response_text = sampler_response.response_text + return response_text.lower().strip() == 'yes' + + +def _compute_stat(values: list, stat: str): + if stat == 'mean': + return np.mean(values) + elif stat == 'std': + return np.std(values) + elif stat == 'min': + return np.min(values) + elif stat == 'max': + return np.max(values) + elif stat == 'n_samples': + return len(values) + elif stat == 'bootstrap_std': + return np.std([ + np.mean(np.random.choice(values, len(values))) for _ in range(1000) + ]) + else: + raise ValueError(f'Unknown {stat =}') + + +def aggregate_results( + single_eval_results: list[SingleEvalResult], + default_stats: tuple[str, ...] = ('mean', 'std'), + name2stats: dict[str, tuple[str]] | None = None, +) -> EvalResult: + """Aggregate results from multiple evaluations into a single EvalResult.""" + name2stats = name2stats or {} + name2values = defaultdict(list) + htmls = [] + convos = [] + metadata = [] + for single_eval_result in single_eval_results: + for name, value in single_eval_result.metrics.items(): + name2values[name].append(value) + if single_eval_result.score is not None: + name2values['score'].append(single_eval_result.score) + htmls.append(single_eval_result.html) + convos.append(single_eval_result.convo) + metadata.append(single_eval_result.example_level_metadata) + final_metrics = {} + for name, values in name2values.items(): + stats = name2stats.get(name, default_stats) + for stat in stats: + key = name if stat == 'mean' else f'{name}:{stat}' + final_metrics[key] = _compute_stat(values, stat) + return EvalResult( + score=final_metrics.pop('score', None), + metrics=final_metrics, + htmls=htmls, + convos=convos, + metadata={'example_level_metadata': metadata}, + ) + + +def map_with_progress( + f: Callable, + xs: list[Any], + num_threads: int = os.cpu_count() or 10, + pbar: bool = True, +): + """Apply f to each element of xs, using a ThreadPool, and show progress.""" + pbar_fn = tqdm if pbar else lambda x, *args, **kwargs: x + + if os.getenv('debug'): + return list(map(f, pbar_fn(xs, total=len(xs)))) + else: + with ThreadPool(min(num_threads, len(xs))) as pool: + return list(pbar_fn(pool.imap(f, xs), total=len(xs))) + + +jinja_env = jinja2.Environment( + loader=jinja2.BaseLoader(), + undefined=jinja2.StrictUndefined, + autoescape=jinja2.select_autoescape(['html', 'xml']), +) +_message_template = """ +
+
+ {{ role }} + {% if variant %}({{ variant }}){% endif %} +
+
+
{{ content }}
+
+
+""" + + +def message_to_html(message: Message) -> str: + """Generate HTML snippet (inside a
) for a message.""" + return jinja_env.from_string(_message_template).render( + role=message['role'], + content=message['content'], + variant=message.get('variant', None), + ) + + +jinja_env.globals['message_to_html'] = message_to_html + +_report_template = """ + + + + + + {% if metrics %} +

Metrics

+ + + + + + + + + + {% for name, value in metrics.items() %} + + + + + {% endfor %} +
MetricValue
Score{{ score | float | round(3) }}
{{ name }}{{ value }}
+ {% endif %} +

Examples

+ {% for html in htmls %} + {{ html | safe }} +
+ {% endfor %} + + +""" + + +def make_report(eval_result: EvalResult) -> str: + """Create a standalone HTML report from an EvalResult.""" + return jinja_env.from_string(_report_template).render( + score=eval_result.score, + metrics=eval_result.metrics, + htmls=eval_result.htmls, + ) + + +def make_report_from_example_htmls(htmls: list[str]): + """Create a standalone HTML report from a list of example htmls.""" + return jinja_env.from_string(_report_template).render(score=None, + metrics={}, + htmls=htmls) + + +def normalize_response(response: str) -> str: + """Normalize the response by removing markdown and LaTeX formatting that + may prevent a match.""" + + return (response.replace('**', '').replace('$\\boxed{', '').replace( + '}$', '').replace('\\$', '').replace('$\\text{', '').replace( + '$', '').replace('\\mathrm{', '').replace('\\{', '').replace( + '\\text', + '').replace('\\(', + '').replace('\\mathbf{', + '').replace('{', + '').replace('\\boxed', '')) + + +def normalize_extracted_answer(extracted_answer: str) -> str: + return ( + # In arabic these are the letters used for A-D in multiple choice questions + extracted_answer.replace('أ', ' A').replace('ب', ' B').replace( + 'ج', ' C').replace('د', ' D') + # In Bengali these are the letters used for A-D in multiple choice questions + .replace('অ', ' A').replace('ব', + ' B').replace('ড', + ' C').replace('ঢ', ' D') + # In Japanese these are the letters sometimes used for A-D in multiple choice questions + .replace('A', ' A').replace('B', + ' B').replace('C', + ' C').replace('D', + ' D').strip()) + + +def url_to_fileobj(url: str, binary=False) -> Any: + response = requests.get(url) + response.raise_for_status() + return io.BytesIO(response.content) if binary else io.StringIO( + response.text) + + +def has_only_user_assistant_messages(messages: list[Message]) -> bool: + """Check if the messages only contain user and assistant messages.""" + return all(m['role'] in ('user', 'assistant') for m in messages) diff --git a/opencompass/datasets/healthbench/drop_eval.py b/opencompass/datasets/healthbench/drop_eval.py new file mode 100644 index 00000000..e6205ca4 --- /dev/null +++ b/opencompass/datasets/healthbench/drop_eval.py @@ -0,0 +1,331 @@ +""" +DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs +Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, Matt Gardner +https://arxiv.org/abs/1903.00161 +""" + +import gzip +import json +import random +import re +import string +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import numpy as np +from scipy.optimize import linear_sum_assignment + +from . import common +from .common import ANSWER_PATTERN, HTML_JINJA +from .types import Eval, EvalResult, SamplerBase, SingleEvalResult +""" +From here through _normalize_answer was originally copied from: +https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ +Then cleaned up and modified a bit. + +The rest was originally copied from https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc +/eval/drop_eval.py +""" + + +def _remove_articles(text: str) -> str: + regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) + return re.sub(regex, ' ', text) + + +def _white_space_fix(text: str) -> str: + return ' '.join(text.split()) + + +EXCLUDE = set(string.punctuation) + + +def _remove_punc(text: str) -> str: + if not _is_number(text): + return ''.join(ch for ch in text if ch not in EXCLUDE) + else: + return text + + +def _lower(text: str) -> str: + return text.lower() + + +def _tokenize(text: str) -> List[str]: + return re.split(' |-', text) + + +def _normalize_answer(text: str) -> str: + """Lower text and remove punctuation, articles and extra whitespace.""" + + parts = [ + _white_space_fix( + _remove_articles(_normalize_number(_remove_punc(_lower(token))))) + for token in _tokenize(text) + ] + parts = [part for part in parts if part.strip()] + normalized = ' '.join(parts).strip() + return normalized + + +def _is_number(text: str) -> bool: + try: + float(text) + return True + except ValueError: + return False + + +def _normalize_number(text: str) -> str: + if _is_number(text): + return str(float(text)) + else: + return text + + +def _answer_to_bags( + answer: Union[str, List[str], Tuple[str, ...]] +) -> Tuple[List[str], List[Set[str]]]: + if isinstance(answer, (list, tuple)): + raw_spans = answer + else: + raw_spans = [answer] + normalized_spans: List[str] = [] + token_bags = [] + for raw_span in raw_spans: + normalized_span = _normalize_answer(raw_span) + normalized_spans.append(normalized_span) + token_bags.append(set(normalized_span.split())) + return normalized_spans, token_bags + + +def _align_bags(predicted: List[Set[str]], + gold: List[Set[str]]) -> List[float]: + """Takes gold and predicted answer sets and first finds the optimal 1-1 + alignment between them and gets maximum metric values over all the + answers.""" + scores = np.zeros([len(gold), len(predicted)]) + for gold_index, gold_item in enumerate(gold): + for pred_index, pred_item in enumerate(predicted): + if _match_numbers_if_present(gold_item, pred_item): + scores[gold_index, + pred_index] = _compute_f1(pred_item, gold_item) + row_ind, col_ind = linear_sum_assignment(-scores) + + max_scores = np.zeros([max(len(gold), len(predicted))]) + for row, column in zip(row_ind, col_ind): + max_scores[row] = max(max_scores[row], scores[row, column]) + return max_scores + + +def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float: + intersection = len(gold_bag.intersection(predicted_bag)) + if not predicted_bag: + precision = 1.0 + else: + precision = intersection / float(len(predicted_bag)) + if not gold_bag: + recall = 1.0 + else: + recall = intersection / float(len(gold_bag)) + f1 = ((2 * precision * recall) / (precision + recall) + if not (precision == 0.0 and recall == 0.0) else 0.0) * 100 + return f1 + + +def _match_numbers_if_present(gold_bag: Set[str], + predicted_bag: Set[str]) -> bool: + gold_numbers = set() + predicted_numbers = set() + for word in gold_bag: + if _is_number(word): + gold_numbers.add(word) + for word in predicted_bag: + if _is_number(word): + predicted_numbers.add(word) + if (not gold_numbers) or gold_numbers.intersection(predicted_numbers): + return True + return False + + +def get_drop_metrics( + predicted: Union[str, List[str], Tuple[str, ...]], + gold: Union[str, List[str], Tuple[str, ...]]) -> Tuple[float, float]: + """Takes a predicted answer and a gold answer (that are both either a + string or a list of strings), and returns exact match and the DROP F1 + metric for the prediction. + + If you are + writing a script for evaluating objects in memory (say, the output of predictions during + validation, or while training), this is the function you want to call, after using + :func:`answer_json_to_strings` when reading the gold answer from the released data file. + """ + predicted_bags = _answer_to_bags(predicted) + gold_bags = _answer_to_bags(gold) + + if set(predicted_bags[0]) == set(gold_bags[0]) and len( + predicted_bags[0]) == len(gold_bags[0]): + exact_match = 1.0 + else: + exact_match = 0.0 + + f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) + f1 = np.mean(f1_per_bag) + f1 = round(f1, 2) + return exact_match, f1 + + +def answer_json_to_strings( + answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: + """Takes an answer JSON blob from the DROP data release and converts it + into strings used for evaluation.""" + if 'number' in answer and answer['number']: + return tuple([str(answer['number'])]), 'number' + elif 'spans' in answer and answer['spans']: + return tuple( + answer['spans']), 'span' if len(answer['spans']) == 1 else 'spans' + elif 'date' in answer: + return ( + tuple([ + '{0} {1} {2}'.format(answer['date']['day'], + answer['date']['month'], + answer['date']['year']).strip() + ]), + 'date', + ) + else: + raise ValueError( + f'Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}' + ) + + +def answer_json_to_string(answer_json): + return json.dumps(answer_json_to_strings(answer_json)) + + +def normalize(s: str) -> str: + """Lower text and remove punctuation, articles and extra whitespace.""" + s = s.lower() + exclude = set(string.punctuation) + s = ''.join(char for char in s if char not in exclude) + s = re.sub(r'\b(a|an|the)\b', ' ', s) + s = ' '.join(s.split()) + return s + + +def fuzzy_match(s1: str, s2: str) -> bool: + s1 = normalize(s1) + s2 = normalize(s2) + + if s1 == '' or s2 == '': + return s1 == s2 + + return s1 in s2 or s2 in s1 + + +def drop_metric(sample: str, reference: list[str]) -> Tuple[float, float]: + em_scores = [] + f1_scores = [] + for answer in reference: + if answer.strip() != '': + em, f1 = get_drop_metrics(sample, answer) + em_scores.append(em) + f1_scores.append(f1) + return (max(em_scores), max(f1_scores)) + + +class DropEval(Eval): + + def __init__(self, + num_examples: int | None = None, + train_samples_per_prompt: int = 3): + self.seed = 42 + self._num_examples = num_examples + self._train_samples_per_prompt = train_samples_per_prompt + self.train_jsonl = ( + 'https://openaipublic.blob.core.windows.net/simple-evals/drop_v0_train.jsonl.gz' + ) + self.test_jsonl = ( + 'https://openaipublic.blob.core.windows.net/simple-evals/drop_v0_dev.jsonl.gz' + ) + with gzip.GzipFile(fileobj=common.url_to_fileobj(self.train_jsonl, + binary=True), + mode='rb') as f: + self.train_samples = list(map(json.loads, f.readlines())) + with gzip.GzipFile(fileobj=common.url_to_fileobj(self.test_jsonl, + binary=True), + mode='rb') as f: + self.test_samples = list(map(json.loads, f.readlines())) + if self._num_examples: + self.test_samples = random.Random(self.seed).sample( + self.test_samples, self._num_examples) + + def __call__(self, sampler: SamplerBase) -> EvalResult: + rng = random.Random(self.seed) + + def fn(example: dict[str, str]): + stuffing = rng.sample(self.train_samples, + self._train_samples_per_prompt) + + # prompt = """TASK: Read the provided passage, then identify the correct answer to questions below.""" + prompt = """You will be asked to read a passage and answer a question. Some examples of passages and Q&A are provided below.""" + prompt += '\n\n# Examples' + samples = stuffing + [example] + for i, sample in enumerate(samples): + is_test = i == len(stuffing) + prompt += '\n# Your Task\n' if is_test else '' + prompt += f""" +--- +{sample["context"]} """ + + a = sample['completion'] + correct_answers = sample['ref_text'].split('|') + + if not is_test: + prompt += a + '\n' + else: + prompt += """\n +Think step by step, then write a line of the form "Answer: $ANSWER" at the end of your response. + """ + prompt_messages = [ + sampler._pack_message(content=prompt, role='user') + ] + sampler_response = sampler(prompt_messages) + response_text = sampler_response.response_text + actual_queried_prompt_messages = sampler_response.actual_queried_message_list + match = re.search(ANSWER_PATTERN, response_text) + extracted_answer = match.group( + 1) if match else response_text + em_score, f1_score = drop_metric(extracted_answer, + correct_answers) + matches = [ + fuzzy_match(extracted_answer, correct_answer) + for correct_answer in correct_answers + ] + extracted_answers = [ + extracted_answer for i in range(len(correct_answers)) + if matches[i] + ] + score = True in matches + html = common.jinja_env.from_string(HTML_JINJA).render( + prompt_messages=actual_queried_prompt_messages, + next_message=dict(content=extracted_answer, + role='assistant'), + score=score, + correct_answer=correct_answers, + extracted_answer=extracted_answers, + ) + convo = actual_queried_prompt_messages + [ + dict(content=extracted_answer, role='assistant') + ] + return SingleEvalResult( + html=html, + score=score, + convo=convo, + metrics={ + 'em_score': em_score, + 'f1_score': f1_score + }, + ) + + results = common.map_with_progress(fn, self.test_samples) + return common.aggregate_results(results) diff --git a/opencompass/datasets/healthbench/gpqa_eval.py b/opencompass/datasets/healthbench/gpqa_eval.py new file mode 100644 index 00000000..13f09b4b --- /dev/null +++ b/opencompass/datasets/healthbench/gpqa_eval.py @@ -0,0 +1,88 @@ +""" +GPQA: A Graduate-Level Google-Proof Q&A Benchmark +David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, Samuel R. Bowman +https://arxiv.org/abs/2311.12022 +""" + +import random +import re + +import pandas + +from . import common +from .common import (ANSWER_PATTERN_MULTICHOICE, HTML_JINJA, + format_multichoice_question) +from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult + + +class GPQAEval(Eval): + + def __init__( + self, + n_repeats: int = 4, + variant: str = 'diamond', + num_examples: int + | None = None, # restrict to a subset of the data for debugging + ): + df = pandas.read_csv( + f'https://openaipublic.blob.core.windows.net/simple-evals/gpqa_{variant}.csv' + ) + examples = [row.to_dict() for _, row in df.iterrows()] + rng = random.Random(0) + if num_examples: + assert n_repeats == 1, 'n_repeats only supported for num_examples = None' + examples = rng.sample(examples, num_examples) + examples = examples * n_repeats + examples = [ + example | { + 'permutation': rng.sample(range(4), 4) + } for example in examples + ] + self.examples = examples + self.n_repeats = n_repeats + + def __call__(self, sampler: SamplerBase) -> EvalResult: + + def fn(row: dict): + choices = [ + row['Correct Answer'], + row['Incorrect Answer 1'], + row['Incorrect Answer 2'], + row['Incorrect Answer 3'], + ] + choices = [choices[i] for i in row['permutation']] + correct_index = choices.index(row['Correct Answer']) + correct_answer = 'ABCD'[correct_index] + choices_dict = dict(A=choices[0], + B=choices[1], + C=choices[2], + D=choices[3], + Question=row['Question']) + prompt_messages = [ + sampler._pack_message( + content=format_multichoice_question(choices_dict), + role='user') + ] + sampler_response = sampler(prompt_messages) + response_text = sampler_response.response_text + actual_queried_prompt_messages = sampler_response.actual_queried_message_list + match = re.search(ANSWER_PATTERN_MULTICHOICE, response_text) + extracted_answer = match.group(1) if match else None + score = 1.0 if extracted_answer == correct_answer else 0.0 + html = common.jinja_env.from_string(HTML_JINJA).render( + prompt_messages=actual_queried_prompt_messages, + next_message=dict(content=response_text, role='assistant'), + score=score, + correct_answer=correct_answer, + extracted_answer=extracted_answer, + ) + convo = actual_queried_prompt_messages + [ + dict(content=response_text, role='assistant') + ] + return SingleEvalResult(html=html, + score=score, + convo=convo, + metrics={'chars': len(response_text)}) + + results = common.map_with_progress(fn, self.examples) + return common.aggregate_results(results) diff --git a/opencompass/datasets/healthbench/healthbench.py b/opencompass/datasets/healthbench/healthbench.py new file mode 100644 index 00000000..b53d2bb8 --- /dev/null +++ b/opencompass/datasets/healthbench/healthbench.py @@ -0,0 +1,359 @@ +import json +import re + +from datasets import Dataset, load_dataset + +from opencompass.openicl import BaseEvaluator +from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS +from opencompass.utils import get_logger + +from ..base import BaseDataset +from . import common +from .healthbench_eval import HealthBenchEval, RubricItem +from .healthbench_meta_eval import HealthBenchMetaEval +from .sampler.chat_completion_sampler import ChatCompletionSampler +from .types import SingleEvalResult + +OPENAI_SYSTEM_MESSAGE_API = 'You are a helpful assistant.' +OPENAI_SYSTEM_MESSAGE_CHATGPT = ( + 'You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.' + + '\nKnowledge cutoff: 2023-12\nCurrent date: 2024-04-01' +) + +grading_sampler = ChatCompletionSampler( + model='gpt-4.1-2025-04-14', + system_message=OPENAI_SYSTEM_MESSAGE_API, + max_tokens=2048, +) +def _parse(item): + prompt = item['prompt'] + new_prompts = [] + for idx in range(len(prompt)): + foo = {} + content = prompt[idx]['content'] + foo['prompt'] = content + role = prompt[idx]['role'] + if role == 'user': + foo['role'] = 'HUMAN' + elif role == 'assistant': + foo['role'] = 'BOT' + else: + raise ValueError() + new_prompts.append(foo) + item['prompt_trans'] = new_prompts + # item["rubrics"] = [RubricItem.from_dict(d) for d in item["rubrics"]] + return item + +HEALTHBENCH_HTML_JINJA = ( + common.HTML_JINJA.replace( + '

Correct Answer: {{ correct_answer }}

\n', + '', + ) + + '

Rubrics with grades: {{ rubric_grades }}

' +) + + +def parse_json_to_dict(json_string: str) -> dict: + # Remove markdown-style ```json``` markers if present + json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip()) + + try: + return json.loads(json_cleaned) + except json.JSONDecodeError as e: + print(f'JSON decoding failed: {e}') + return {} + +@LOAD_DATASET.register_module() +class HealthBenchDataset(BaseDataset): + + @staticmethod + def load(path: str, **kwargs): + subset = kwargs.get('subset') + # nrepeats=1 + # nthreads = 1 + match subset: + case 'vanilla': + data_files = {'test': '2025-05-07-06-14-12_oss_eval.jsonl'} + case 'hard': + data_files = {'test': 'hard_2025-05-08-21-00-10.jsonl'} + case 'consensus': + data_files = {'test': 'consensus_2025-05-09-20-00-46.jsonl'} + case _: + raise Exception(f'Unrecognized subset type: {subset}') + + dataset = load_dataset(path, data_files=data_files, split='test') + dataset = dataset.select(range(2)) + dataset = dataset.map(lambda item: _parse(item)) + + return dataset +from collections import defaultdict + +from .types import MessageList + +GRADER_TEMPLATE = """ +Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item. + +# Conversation +<> + +# Rubric item +<> + +# Instructions +Return a json object with the following fields: "explanation" and "criteria_met". +- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item. +- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met. +- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria. + +# Example 1 +For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this: + +```json +{ + "explanation": "The assistant did not tell the user to go to the hospital if unconscious.", + "criteria_met": false +} +``` + +# Example 2 +Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not. +For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this: + +```json +{ + "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.", + "criteria_met": false +} +``` + +As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this: + +```json +{ + "explanation": "The response is concise, so the criteria was not met.", + "criteria_met": false +} +``` + +In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true. + +# Final instruction +Return just the json object in markdown format. Do not include any other text in the response. +""".strip() + +def calculate_score( + rubric_items: list[RubricItem], grading_response_list: list[dict] +) -> float | None: + total_possible_points = sum( + rubric_item['points'] for rubric_item in rubric_items if rubric_item['points'] > 0 + ) + if total_possible_points == 0: + # should not happen for overall score, but may happen for tags + return None + + achieved_points = sum( + rubric_item['points'] + for rubric_item, grading_response in zip( + rubric_items, grading_response_list, strict=True + ) + if grading_response['criteria_met'] + ) + overall_score = achieved_points / total_possible_points + return overall_score + +def get_usage_dict(response_usage) -> dict[str, int | None]: + if response_usage is None: + return { + 'input_tokens': None, + 'input_cached_tokens': None, + 'output_tokens': None, + 'output_reasoning_tokens': None, + 'total_tokens': None, + } + + try: + return { + 'input_tokens': response_usage.input_tokens, + 'input_cached_tokens': response_usage.input_tokens_details.cached_tokens + if hasattr(response_usage.input_tokens_details, 'cached_tokens') + else response_usage.input_tokens_details['cached_tokens'], + 'output_tokens': response_usage.output_tokens, + 'output_reasoning_tokens': response_usage.output_tokens_details.reasoning_tokens + if hasattr(response_usage.output_tokens_details, 'reasoning_tokens') + else response_usage.output_tokens_details['reasoning_tokens'], + 'total_tokens': response_usage.total_tokens, + } + except AttributeError: + return { + 'input_tokens': response_usage.prompt_tokens, + 'input_cached_tokens': response_usage.prompt_tokens_details.cached_tokens + if hasattr(response_usage.prompt_tokens_details, 'cached_tokens') + else response_usage.prompt_tokens_details['cached_tokens'], + 'output_tokens': response_usage.completion_tokens, + 'output_reasoning_tokens': response_usage.completion_tokens_details.reasoning_tokens + if hasattr(response_usage.completion_tokens_details, 'reasoning_tokens') + else response_usage.completion_tokens_details['reasoning_tokens'], + 'total_tokens': response_usage.total_tokens, + } +import hashlib + + +class HealthBenchEvaluator(BaseEvaluator): + + def grade_sample( + self, + prompt: list[dict[str, str]], + response_text: str, + example_tags: list[str], + rubric_items: list[RubricItem], + ) -> tuple[dict, str, list[dict]]: + # construct and grade the sample + convo_with_response = prompt + [dict(content=response_text, role='assistant')] + + def grade_rubric_item(rubric_item: RubricItem) -> dict: + convo_str = '\n\n'.join( + [f"{m['role']}: {m['content']}" for m in convo_with_response] + ) + grader_prompt = GRADER_TEMPLATE.replace( + '<>', convo_str + ).replace('<>', str(rubric_item)) + messages: MessageList = [dict(content=grader_prompt, role='user')] + while True: + sampler_response = grading_sampler(messages) + grading_response = sampler_response.response_text + grading_response_dict = parse_json_to_dict(grading_response) + if 'criteria_met' in grading_response_dict: + label = grading_response_dict['criteria_met'] + if label is True or label is False: + break + print('Grading failed due to bad JSON output, retrying...') + return grading_response_dict + + grading_response_list = common.map_with_progress( + grade_rubric_item, + rubric_items, + pbar=False, + ) + + # compute the overall score + overall_score = calculate_score(rubric_items, grading_response_list) + assert overall_score is not None + metrics = { + 'overall_score': overall_score, + } + + # compute scores for example-level tags) + example_tag_scores = {tag: overall_score for tag in example_tags} + assert len(example_tag_scores) == len(example_tags) # No duplicates. + metrics.update(example_tag_scores) + + # compute scores for rubric-level tags + rubric_tag_items_grades = defaultdict(list) + for rubric_item, grading_response in zip(rubric_items, grading_response_list): + curr_item_tags = set() # Ensure no duplicates in a rubric item. + for tag in rubric_item['tags']: + rubric_tag_items_grades[tag].append((rubric_item, grading_response)) + assert tag not in curr_item_tags + curr_item_tags.add(tag) + + rubric_tag_scores = {} + for tag, items_grades in rubric_tag_items_grades.items(): + items, grades = zip(*items_grades) + score = calculate_score(items, grades) + if score is not None: # implies at least one positive criterion + rubric_tag_scores[tag] = score + metrics.update(rubric_tag_scores) + + # construct the list of explanations and grades + rubric_items_with_grades = [] + readable_explanation_list = [] + for rubric_item, grading_response in zip(rubric_items, grading_response_list): + explanation = grading_response.get('explanation', 'No explanation provided') + criteria_met = grading_response['criteria_met'] + readable_explanation = ( + f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}' + ) + readable_explanation_list.append(readable_explanation) + rubric_items_with_grades.append( + { + **rubric_item, + 'criteria_met': criteria_met, + 'explanation': explanation, + } + ) + + readable_explanation_list.sort( + key=lambda x: x.startswith('[False]'), reverse=True + ) + readable_explanation_str = '\n\n'.join(readable_explanation_list) + readable_explanation_str = f'\n\n{readable_explanation_str}' + + return metrics, readable_explanation_str, rubric_items_with_grades + + def score(self, predictions, references, test_set): + results = [] + ret = [] + if len(predictions) != len(references): + return {'error': 'preds and refrs have different length'} + all_score = 0 + for idx, (i, j) in enumerate(zip(predictions, references)): + row = test_set[idx] + prompt_messages = row['prompt'] + response_text = i + response_usage = None + actual_queried_prompt_messages = prompt_messages + + metrics, readable_explanation_str, rubric_items_with_grades = ( + self.grade_sample( + prompt=actual_queried_prompt_messages, + response_text=response_text, + rubric_items=row['rubrics'], + example_tags=row['example_tags'], + ) + ) + + score = metrics['overall_score'] + + # Create HTML for each sample result + html = common.jinja_env.from_string( + HEALTHBENCH_HTML_JINJA.replace( + '{{ rubric_grades }}', + readable_explanation_str.replace('\n', '
'), + ) + ).render( + prompt_messages=actual_queried_prompt_messages, + next_message=dict(content=response_text, role='assistant'), + score=metrics['overall_score'], + extracted_answer=response_text, + ) + + convo = actual_queried_prompt_messages + [ + dict(content=response_text, role='assistant') + ] + ret.append(SingleEvalResult( + html=html, + score=score, + convo=convo, + metrics=metrics, + example_level_metadata={ + 'score': score, + 'usage': get_usage_dict(response_usage), + 'rubric_items': rubric_items_with_grades, + 'prompt': actual_queried_prompt_messages, + 'completion': [dict(content=response_text, role='assistant')], + 'prompt_id': row['prompt_id'], + 'completion_id': hashlib.sha256( + (row['prompt_id'] + response_text).encode('utf-8') + ).hexdigest(), + }, + )) + all_score += score + avg_score = all_score / float(idx+1) + + return { + 'score': avg_score + } + + + + diff --git a/opencompass/datasets/healthbench/healthbench_eval.py b/opencompass/datasets/healthbench/healthbench_eval.py new file mode 100644 index 00000000..055b9b44 --- /dev/null +++ b/opencompass/datasets/healthbench/healthbench_eval.py @@ -0,0 +1,648 @@ +"""This script evaluates the performance of a model on the HealthBench dataset. + +To run HealthBench, HealthBench Consensus, or HealthBench Hard, use the simple-evals script: +- `python -m simple-evals.simple_evals --eval=healthbench --model=gpt-4.1` +- `python -m simple-evals.simple_evals --eval=healthbench_consensus --model=gpt-4.1` +- `python -m simple-evals.simple_evals --eval=healthbench_hard --model=gpt-4.1` + +You can also evaluate physician ideal completions or reference completions against the HealthBench rubrics. To do so, run the following command: +- To evaluate physician ideal completions: `python -m simple-evals.healthbench_eval --run_mode=physician_completions` +- To evaluate reference model completions used by physicians: `python -m simple-evals.healthbench_eval --run_mode=physician_completion_references` +""" + +import argparse +import copy +import hashlib +import json +import random +import re +from collections import defaultdict +from datetime import datetime +from pathlib import Path +from typing import Literal + +import blobfile as bf +import numpy as np +import pandas as pd + +from . import common +from .sampler.chat_completion_sampler import (OPENAI_SYSTEM_MESSAGE_API, + ChatCompletionSampler) +from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult + +INPUT_PATH = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_eval.jsonl' +INPUT_PATH_HARD = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/hard_2025-05-08-21-00-10.jsonl' +INPUT_PATH_CONSENSUS = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/consensus_2025-05-09-20-00-46.jsonl' + +GRADER_TEMPLATE = """ +Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item. + +# Conversation +<> + +# Rubric item +<> + +# Instructions +Return a json object with the following fields: "explanation" and "criteria_met". +- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item. +- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met. +- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria. + +# Example 1 +For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this: + +```json +{ + "explanation": "The assistant did not tell the user to go to the hospital if unconscious.", + "criteria_met": false +} +``` + +# Example 2 +Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not. +For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this: + +```json +{ + "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.", + "criteria_met": false +} +``` + +As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this: + +```json +{ + "explanation": "The response is concise, so the criteria was not met.", + "criteria_met": false +} +``` + +In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true. + +# Final instruction +Return just the json object in markdown format. Do not include any other text in the response. +""".strip() + +HEALTHBENCH_HTML_JINJA = (common.HTML_JINJA.replace( + '

Correct Answer: {{ correct_answer }}

\n', + '', +) + '

Rubrics with grades: {{ rubric_grades }}

') + + +def parse_json_to_dict(json_string: str) -> dict: + # Remove markdown-style ```json``` markers if present + json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip()) + + try: + return json.loads(json_cleaned) + except json.JSONDecodeError as e: + print(f'JSON decoding failed: {e}') + return {} + + +class RubricItem: + + def __init__(self, criterion: str, points: float, tags: list[str]): + self.criterion = criterion + self.points = points + self.tags = tags + + def __str__(self): + return f'[{self.points}] {self.criterion}' + + def to_dict(self): + return { + 'criterion': self.criterion, + 'points': self.points, + 'tags': self.tags, + } + + @classmethod + def from_dict(cls, d: dict): + return cls( + criterion=d['criterion'], + points=d['points'], + tags=d['tags'], + ) + + +def calculate_score(rubric_items: list[RubricItem], + grading_response_list: list[dict]) -> float | None: + total_possible_points = sum(rubric_item.points + for rubric_item in rubric_items + if rubric_item.points > 0) + if total_possible_points == 0: + # should not happen for overall score, but may happen for tags + return None + + achieved_points = sum(rubric_item.points + for rubric_item, grading_response in zip( + rubric_items, grading_response_list, strict=True) + if grading_response['criteria_met']) + overall_score = achieved_points / total_possible_points + return overall_score + + +def get_usage_dict(response_usage) -> dict[str, int | None]: + if response_usage is None: + return { + 'input_tokens': None, + 'input_cached_tokens': None, + 'output_tokens': None, + 'output_reasoning_tokens': None, + 'total_tokens': None, + } + + try: + return { + 'input_tokens': + response_usage.input_tokens, + 'input_cached_tokens': + response_usage.input_tokens_details.cached_tokens if hasattr( + response_usage.input_tokens_details, 'cached_tokens') else + response_usage.input_tokens_details['cached_tokens'], + 'output_tokens': + response_usage.output_tokens, + 'output_reasoning_tokens': + response_usage.output_tokens_details.reasoning_tokens if hasattr( + response_usage.output_tokens_details, 'reasoning_tokens') else + response_usage.output_tokens_details['reasoning_tokens'], + 'total_tokens': + response_usage.total_tokens, + } + except AttributeError: + return { + 'input_tokens': + response_usage.prompt_tokens, + 'input_cached_tokens': + response_usage.prompt_tokens_details.cached_tokens if hasattr( + response_usage.prompt_tokens_details, 'cached_tokens') else + response_usage.prompt_tokens_details['cached_tokens'], + 'output_tokens': + response_usage.completion_tokens, + 'output_reasoning_tokens': + response_usage.completion_tokens_details.reasoning_tokens + if hasattr(response_usage.completion_tokens_details, + 'reasoning_tokens') else + response_usage.completion_tokens_details['reasoning_tokens'], + 'total_tokens': + response_usage.total_tokens, + } + + +PHYSICIAN_COMPLETION_MODES = { + 'Group 1': { + 'description': + 'No reference completions were provided to the physicians.', + 'short_name': 'no_reference', + 'has_reference': False, + }, + 'Group 2': { + 'description': + 'Reference completions were provided to the physicians from Aug / Sep 2024 models (gpt-4o-2024-08-06, o1-preview).', + 'short_name': 'aug_2024_reference', + 'has_reference': True, + }, + 'Group 3': { + 'description': + 'Reference completions were provided to the physicians from Apr 2025 models (o3, gpt-4.1).', + 'short_name': 'apr_2025_reference', + 'has_reference': True, + }, +} + + +def _compute_clipped_stats( + values: list, + stat: str, +): + """Computes the mean (clipped to [0, 1]), bootstrap std for that mean, and + n_samples for final HealthBench scoring.""" + if stat == 'mean': + return np.clip(np.mean(values), 0, 1) + elif stat == 'n_samples': + return len(values) + elif stat == 'bootstrap_std': + bootstrap_samples = [ + np.random.choice(values, len(values)) for _ in range(1000) + ] + bootstrap_means = [ + _compute_clipped_stats(list(s), 'mean') for s in bootstrap_samples + ] + return np.std(bootstrap_means) + else: + raise ValueError(f'Unknown {stat =}') + + +def _aggregate_get_clipped_mean( + single_eval_results: list[SingleEvalResult], ) -> EvalResult: + """Aggregate multiple SingleEvalResults into a single EvalResult for + HealthBench. + + For each metric, returns the stats in _compute_clipped_stats. + """ + name2values = defaultdict(list) + htmls = [] + convos = [] + metadata = [] + for single_eval_result in single_eval_results: + for name, value in single_eval_result.metrics.items(): + name2values[name].append(value) + if single_eval_result.score is not None: + name2values['score'].append(single_eval_result.score) + htmls.append(single_eval_result.html) + convos.append(single_eval_result.convo) + metadata.append(single_eval_result.example_level_metadata) + final_metrics = {} + for name, values in name2values.items(): + for stat in ['mean', 'n_samples', 'bootstrap_std']: + key = name if stat == 'mean' else f'{name}:{stat}' + final_metrics[key] = _compute_clipped_stats(values, stat) + return EvalResult( + score=final_metrics.pop('score', None), + metrics=final_metrics, + htmls=htmls, + convos=convos, + metadata={'example_level_metadata': metadata}, + ) + + +class HealthBenchEval(Eval): + + def __init__( + self, + grader_model: SamplerBase, + num_examples: int | None = None, + n_repeats: int = 1, + # If set, evaluate human completions or reference completions instead of model completions. + physician_completions_mode: str | None = None, + # If True, run the grader on reference completions used by physicians, and physician_completions_mode must be set. + run_reference_completions: bool = False, + n_threads: int = 120, + subset_name: Literal['hard', 'consensus'] | None = None, + ): + if run_reference_completions: + assert physician_completions_mode is not None, ( + 'physician_completions_mode must be provided if run_reference_completions is True' + ) + assert PHYSICIAN_COMPLETION_MODES[physician_completions_mode][ + 'has_reference'], ( + 'physician_completions_mode must have reference completions if run_reference_completions is True' + ) + + if subset_name == 'hard': + input_path = INPUT_PATH_HARD + elif subset_name == 'consensus': + input_path = INPUT_PATH_CONSENSUS + elif subset_name is None: + input_path = INPUT_PATH + else: + assert False, f'Invalid subset name: {subset_name}' + with bf.BlobFile(input_path, 'rb') as f: + examples = [json.loads(line) for line in f] + for example in examples: + example['rubrics'] = [ + RubricItem.from_dict(d) for d in example['rubrics'] + ] + + rng = random.Random(0) + + # physician completions mode + self.physician_completions_mode = physician_completions_mode + if self.physician_completions_mode is not None: + assert self.physician_completions_mode in PHYSICIAN_COMPLETION_MODES, ( + f'Invalid physician completions mode: {self.physician_completions_mode}; must be one of {PHYSICIAN_COMPLETION_MODES.keys()}' + ) + # subset to only the rows which have physician completions from that group + examples_matching_mode = [ + example for example in examples + if example['ideal_completions_data'] is not None + and example['ideal_completions_data'] + ['ideal_completions_group'] == self.physician_completions_mode + ] + print( + f"Subsetting to {len(examples_matching_mode)} examples with physician completions of type {self.physician_completions_mode} ({PHYSICIAN_COMPLETION_MODES[self.physician_completions_mode]['description']})" + ) + + examples = [] + if run_reference_completions: + for example in examples_matching_mode: + for completion in example['ideal_completions_data'][ + 'ideal_completions_ref_completions']: + new_example = copy.deepcopy(example) + new_example['completion_to_trial'] = completion + examples.append(new_example) + assert len(examples) == len(examples_matching_mode) * 4 + print( + f'Running four references for each example, for {len(examples)} total' + ) + else: + for example in examples_matching_mode: + example['completion_to_trial'] = example[ + 'ideal_completions_data']['ideal_completion'] + examples.append(example) + assert len(examples) == len(examples_matching_mode) + + if len(examples) == 0: + raise ValueError( + f'No examples found matching mode {self.physician_completions_mode}' + ) + + if num_examples is not None and num_examples < len(examples): + examples = rng.sample( + examples, + num_examples, + ) + + self.examples = examples * n_repeats + self.n_threads = n_threads + self.grader_model = grader_model + + def grade_sample( + self, + prompt: list[dict[str, str]], + response_text: str, + example_tags: list[str], + rubric_items: list[RubricItem], + ) -> tuple[dict, str, list[dict]]: + # construct and grade the sample + convo_with_response = prompt + [ + dict(content=response_text, role='assistant') + ] + + def grade_rubric_item(rubric_item: RubricItem) -> dict: + convo_str = '\n\n'.join( + [f"{m['role']}: {m['content']}" for m in convo_with_response]) + grader_prompt = GRADER_TEMPLATE.replace('<>', + convo_str).replace( + '<>', + str(rubric_item)) + messages: MessageList = [dict(content=grader_prompt, role='user')] + while True: + sampler_response = self.grader_model(messages) + grading_response = sampler_response.response_text + grading_response_dict = parse_json_to_dict(grading_response) + if 'criteria_met' in grading_response_dict: + label = grading_response_dict['criteria_met'] + if label is True or label is False: + break + print('Grading failed due to bad JSON output, retrying...') + return grading_response_dict + + grading_response_list = common.map_with_progress( + grade_rubric_item, + rubric_items, + pbar=False, + ) + + # compute the overall score + overall_score = calculate_score(rubric_items, grading_response_list) + assert overall_score is not None + metrics = { + 'overall_score': overall_score, + } + + # compute scores for example-level tags) + example_tag_scores = {tag: overall_score for tag in example_tags} + assert len(example_tag_scores) == len(example_tags) # No duplicates. + metrics.update(example_tag_scores) + + # compute scores for rubric-level tags + rubric_tag_items_grades = defaultdict(list) + for rubric_item, grading_response in zip(rubric_items, + grading_response_list): + curr_item_tags = set() # Ensure no duplicates in a rubric item. + for tag in rubric_item.tags: + rubric_tag_items_grades[tag].append( + (rubric_item, grading_response)) + assert tag not in curr_item_tags + curr_item_tags.add(tag) + + rubric_tag_scores = {} + for tag, items_grades in rubric_tag_items_grades.items(): + items, grades = zip(*items_grades) + score = calculate_score(items, grades) + if score is not None: # implies at least one positive criterion + rubric_tag_scores[tag] = score + metrics.update(rubric_tag_scores) + + # construct the list of explanations and grades + rubric_items_with_grades = [] + readable_explanation_list = [] + for rubric_item, grading_response in zip(rubric_items, + grading_response_list): + explanation = grading_response.get('explanation', + 'No explanation provided') + criteria_met = grading_response['criteria_met'] + readable_explanation = ( + f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}' + ) + readable_explanation_list.append(readable_explanation) + rubric_items_with_grades.append({ + **rubric_item.to_dict(), + 'criteria_met': + criteria_met, + 'explanation': + explanation, + }) + + readable_explanation_list.sort(key=lambda x: x.startswith('[False]'), + reverse=True) + readable_explanation_str = '\n\n'.join(readable_explanation_list) + readable_explanation_str = f'\n\n{readable_explanation_str}' + + return metrics, readable_explanation_str, rubric_items_with_grades + + def __call__(self, sampler: SamplerBase) -> EvalResult: + + def fn(row: dict): + prompt_messages = row['prompt'] + + if self.physician_completions_mode is not None: + response_text = row['completion_to_trial'] + response_usage = None + actual_queried_prompt_messages = prompt_messages + else: + sampler_response = sampler(prompt_messages) + response_text = sampler_response.response_text + response_dict = sampler_response.response_metadata + actual_queried_prompt_messages = ( + sampler_response.actual_queried_message_list) + response_usage = response_dict.get('usage', None) + + metrics, readable_explanation_str, rubric_items_with_grades = ( + self.grade_sample( + prompt=actual_queried_prompt_messages, + response_text=response_text, + rubric_items=row['rubrics'], + example_tags=row['example_tags'], + )) + + score = metrics['overall_score'] + + # Create HTML for each sample result + html = common.jinja_env.from_string( + HEALTHBENCH_HTML_JINJA.replace( + '{{ rubric_grades }}', + readable_explanation_str.replace('\n', '
'), + )).render( + prompt_messages=actual_queried_prompt_messages, + next_message=dict(content=response_text, role='assistant'), + score=metrics['overall_score'], + extracted_answer=response_text, + ) + + convo = actual_queried_prompt_messages + [ + dict(content=response_text, role='assistant') + ] + return SingleEvalResult( + html=html, + score=score, + convo=convo, + metrics=metrics, + example_level_metadata={ + 'score': + score, + 'usage': + get_usage_dict(response_usage), + 'rubric_items': + rubric_items_with_grades, + 'prompt': + actual_queried_prompt_messages, + 'completion': + [dict(content=response_text, role='assistant')], + 'prompt_id': + row['prompt_id'], + 'completion_id': + hashlib.sha256( + (row['prompt_id'] + + response_text).encode('utf-8')).hexdigest(), + }, + ) + + results = common.map_with_progress( + fn, + self.examples, + num_threads=self.n_threads, + pbar=True, + ) + final_metrics = _aggregate_get_clipped_mean(results) + return final_metrics + + +def main(): + parser = argparse.ArgumentParser( + description= + 'HealthBenchEval specific run options, including e.g., running the eval on physician completions rows only.' + ) + parser.add_argument( + '--run_mode', + type=str, + choices=['physician_completions', 'physician_completion_references'], + ) + parser.add_argument('--examples', + type=int, + help='Number of examples to run') + parser.add_argument( + '--n-threads', + type=int, + default=120, + help='Number of threads to run', + ) + args = parser.parse_args() + + if args.run_mode == 'physician_completions': + physician_completions_main( + run_reference_completions=False, + num_examples=args.examples, + n_threads=args.n_threads or 1, + ) + elif args.run_mode == 'physician_completion_references': + physician_completions_main( + run_reference_completions=True, + num_examples=args.examples, + n_threads=args.n_threads or 1, + ) + + else: + raise ValueError(f'Invalid run mode: {args.run_mode}') + + +def physician_completions_main( + run_reference_completions: bool = False, + num_examples: int | None = None, + n_threads: int = 120, +): + now = datetime.now() + date_str = now.strftime('%Y%m%d_%H%M') + + grading_sampler = ChatCompletionSampler( + model='gpt-4.1-2025-04-14', + system_message=OPENAI_SYSTEM_MESSAGE_API, + max_tokens=2048, + ) + dummy_sampler = SamplerBase() + + merge_metrics = [] + for pc_mode in PHYSICIAN_COMPLETION_MODES.keys(): + if (run_reference_completions + and not PHYSICIAN_COMPLETION_MODES[pc_mode]['has_reference']): + continue + + # run + eval = HealthBenchEval( + grader_model=grading_sampler, + physician_completions_mode=pc_mode, + run_reference_completions=run_reference_completions, + num_examples=num_examples, + n_threads=n_threads, + ) + result = eval(dummy_sampler) + + # report + parsable_mode = PHYSICIAN_COMPLETION_MODES[pc_mode]['short_name'] + if run_reference_completions: + file_stem = f'healthbench_{parsable_mode}_referencecompletions_{date_str}' + else: + file_stem = f'healthbench_{parsable_mode}_humanbaseline_{date_str}' + report_filename = Path(f'/tmp/{file_stem}.html') + report_filename.write_text(common.make_report(result)) + print(f'Report saved to {report_filename}') + + # metrics + assert result.metrics is not None + metrics = result.metrics + result_filename = Path(f'/tmp/{file_stem}.json') + result_filename.write_text(json.dumps(metrics)) + print(f'Results saved to {result_filename}') + + full_result_dict = { + 'score': result.score, + 'metrics': result.metrics, + 'htmls': result.htmls, + 'convos': result.convos, + 'metadata': result.metadata, + } + full_result_filename = Path(f'/tmp/{file_stem}_allresults.json') + full_result_filename.write_text(json.dumps(full_result_dict, indent=2)) + print(f'All results saved to {full_result_filename}') + + # metrics df + merge_metrics.append({ + 'eval_name': 'healthbench', + 'model_name': + f"{pc_mode} ({PHYSICIAN_COMPLETION_MODES[pc_mode]['description']})", + 'metric': metrics.get('overall_score', None), + }) + + merge_metrics_df = pd.DataFrame(merge_metrics).pivot(index=['model_name'], + columns='eval_name') + print('\nAll results: ') + print(merge_metrics_df.to_markdown()) + return merge_metrics + + +if __name__ == '__main__': + main() diff --git a/opencompass/datasets/healthbench/healthbench_eval_test.py b/opencompass/datasets/healthbench/healthbench_eval_test.py new file mode 100644 index 00000000..c2bb64d4 --- /dev/null +++ b/opencompass/datasets/healthbench/healthbench_eval_test.py @@ -0,0 +1,32 @@ +from .healthbench_eval import RubricItem, calculate_score + + +def test_calculate_score(): + rubric_items = [ + RubricItem(criterion='test', points=7, tags=[]), + RubricItem(criterion='test', points=5, tags=[]), + RubricItem(criterion='test', points=10, tags=[]), + RubricItem(criterion='test', points=-6, tags=[]), + ] + grading_response_list = [ + { + 'criteria_met': True + }, + { + 'criteria_met': False + }, + { + 'criteria_met': True + }, + { + 'criteria_met': True + }, + ] + total_possible = 7 + 5 + 10 + achieved = 7 + 0 + 10 - 6 + assert (calculate_score(rubric_items, grading_response_list) == achieved / + total_possible) + + +if __name__ == '__main__': + test_calculate_score() diff --git a/opencompass/datasets/healthbench/healthbench_meta.py b/opencompass/datasets/healthbench/healthbench_meta.py new file mode 100644 index 00000000..64ce8de1 --- /dev/null +++ b/opencompass/datasets/healthbench/healthbench_meta.py @@ -0,0 +1,262 @@ +import re + +from datasets import Dataset, load_dataset + +from opencompass.openicl import BaseEvaluator +from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS +from opencompass.utils import get_logger + +from ..base import BaseDataset +from .healthbench_eval import HealthBenchEval, RubricItem +from .healthbench_meta_eval import HealthBenchMetaEval + + +def _parse(item): + item['rubrics'] = [RubricItem.from_dict(d) for d in item['rubrics']] + return item + +def _parse_meta(item): + item['rubrics'] = [RubricItem.from_dict(d) for d in item['rubrics']] + return item + +@LOAD_DATASET.register_module() +class HealthBenchDataset(BaseDataset): + + @staticmethod + def load(path: str, prompt_mode: str, **kwargs): + subset = kwargs.get('subset') + # nrepeats=1 + # nthreads = 1 + match subset: + case 'healthbench': + data_files = {'test': '2025-05-07-06-14-12_oss_eval.jsonl'} + return HealthBenchEval( + grader_model=grading_sampler, + n_repeats=1, + n_threads=1, + subset_name=None, + ) + case 'healthbench_hard': + data_files = {'test': 'hard_2025-05-08-21-00-10.jsonl'} + return HealthBenchEval( + grader_model=grading_sampler, + n_repeats=1, + n_threads=1, + subset_name='hard', + ) + case 'healthbench_consensus': + data_files = {'test': 'consensus_2025-05-09-20-00-46.jsonl'} + return HealthBenchEval( + grader_model=grading_sampler, + n_repeats=1, + n_threads=1, + subset_name='consensus', + ) + case 'healthbench_meta': + data_files = {'test': '2025-05-07-06-14-12_oss_meta_eval.jsonl' } + return HealthBenchMetaEval( + grader_model=grading_sampler, + n_repeats=1, + n_threads=1, + ) + case _: + raise Exception(f'Unrecognized eval type: {eval_name}') + + dataset = load_dataset(path, data_files=data_files, split='test') + + dataset = dataset.map(lambda item: _parse(item, prompt_mode)) + + return dataset + + +class HealthBenchEvaluator(BaseEvaluator): + + def score(self, predictions, references, test_set): + method = test_set['prompt_mode'][0] + + if len(predictions) != len(references): + return {'error': 'preds and refrs have different length'} + correct = 0 + count = 0 + details = [] + for idx, (i, j) in enumerate(zip(predictions, references)): + i = answer_cleansing(method, i, test_set['options'][idx], + test_set['label'][idx]) + detail = {'pred': i, 'answer': j, 'correct': False} + count += 1 + if i == j: + correct += 1 + detail['correct'] = True + details.append(detail) + result = {'accuracy': 100 * correct / count, 'details': details} + return result + + +@TEXT_POSTPROCESSORS.register_module() +def answer_cleansing( + method: str, + prediction: str, + options: list, + label: str, +) -> str: + + # Clean up unwanted phrases in the prediction + for unwanted_phrase in [ + 'I understand', + 'A through J', + 'A through E', + 'A through D', + ]: + prediction = prediction.replace(unwanted_phrase, '') + + options_num = len(options) + options = [chr(65 + i) for i in range(options_num)] + options_str = r'\b(' + '|'.join(options) + r')\b' + prediction = re.findall(options_str, prediction) + + if len(prediction) == 0: + prediction = [] + else: + # If there is a "label" and its length is 1, + # process prediction accordingly + if len(label) == 1: + if method == 'few-shot': + answer_flag = True if len(prediction) > 1 else False + # choose the first or last element based on the answer_flag + if answer_flag: + prediction = [prediction[0]] + else: + prediction = [prediction[-1]] + elif method == 'zero-shot': + # choose the first element in list + prediction = [prediction[0]] + else: + raise ValueError('Method is not properly defined ...') + + # Remove trailing period if it exists + if prediction[0] and prediction[0].endswith('.'): + prediction[0] = prediction[0][:-1] + + return prediction[0] + + +def _generic_llmjudge_postprocess(judgement: str): + match = re.search(r'(A|B)', judgement) + grade_letter = (match.group(0) if match else 'B' + ) # Default to "INCORRECT" if no match + return grade_letter + + +def HealthBench_llmjudge_postprocess( + output: dict, + output_path: str, + dataset: Dataset, +) -> dict: + # Get the original dataset + original_dataset = dataset.reader.dataset['test'] + + judged_answers = [] + original_responses = [] + references = [] + details = [] + + # Initialize statistics dictionaries + stats = {'medical_task': {}, 'body_system': {}, 'question_type': {}} + + total_correct = 0 + total_count = 0 + + # Process each sample + for k, v in output.items(): + idx = int(k) # Convert key to integer for indexing + original_responses.append(v['prediction']) + processed_judge = _generic_llmjudge_postprocess(v['prediction']) + + # Get category information from the dataset + sample = original_dataset[idx] + medical_task = sample.get('medical_task', 'unknown') + body_system = sample.get('body_system', 'unknown') + question_type = sample.get('question_type', 'unknown') + + # Initialize category stats if not exists + for level, key in [ + ('medical_task', medical_task), + ('body_system', body_system), + ('question_type', question_type), + ]: + if key not in stats[level]: + stats[level][key] = {'correct': 0, 'total': 0} + + # Record the judgment + if processed_judge is not None: + judged_answers.append(processed_judge) + try: + gold = v['gold'] + references.append(gold) + except KeyError: + get_logger().warning( + f'No gold answer for {k}, use empty string as reference!') + gold = '' + references.append('') + + # Check if the answer is correct (A means correct) + is_correct = processed_judge == 'A' + total_count += 1 + + if is_correct: + total_correct += 1 + # Update category stats + for level, key in [ + ('medical_task', medical_task), + ('body_system', body_system), + ('question_type', question_type), + ]: + stats[level][key]['correct'] += 1 + + # Update category totals + for level, key in [ + ('medical_task', medical_task), + ('body_system', body_system), + ('question_type', question_type), + ]: + stats[level][key]['total'] += 1 + # Add to details + details.append({ + 'id': k, + 'question': sample['question'], + 'options': sample['options'], + 'origin_prompt': v['origin_prompt'], + 'llm_judge': processed_judge, + 'gold': gold, + 'is_correct': is_correct, + 'medical_task': medical_task, + 'body_system': body_system, + 'question_type': question_type, + }) + + # Calculate overall accuracy with two decimal places + overall_accuracy = (round( + (total_correct / total_count * 100), 2) if total_count > 0 else 0.00) + + # Initialize results dictionary + results = { + 'accuracy': overall_accuracy, + 'total_correct': total_correct, + 'total_count': total_count, + 'details': details, + } + + # Calculate accuracy for each category and flatten into results + for level in stats: + for key, value in stats[level].items(): + if value['total'] > 0: + # Calculate accuracy with two decimal places + accuracy = round((value['correct'] / value['total'] * 100), 2) + + # Create a flattened key for the category + flat_key = f'HealthBench-{key}' + + # Add to results + results[flat_key] = accuracy + + return results diff --git a/opencompass/datasets/healthbench/healthbench_meta_eval.py b/opencompass/datasets/healthbench/healthbench_meta_eval.py new file mode 100644 index 00000000..d4e9b748 --- /dev/null +++ b/opencompass/datasets/healthbench/healthbench_meta_eval.py @@ -0,0 +1,339 @@ +"""This script evaluates a grader model on grading HealthBench rubrics. It +effectively evaluates the evaluator against physician opinion, so we call it a +meta-evaluation. + +To run, use the following command (working directory should contain simple- +evals folder): `python -m simple-evals.simple_evals --eval=healthbench_meta +--model=gpt-4.1` +""" + +import json +import random +from collections import defaultdict +from typing import Literal + +import blobfile as bf + +from . import common +from .healthbench_eval import GRADER_TEMPLATE, parse_json_to_dict +from .types import Eval, EvalResult, SamplerBase, SingleEvalResult + +INPUT_PATH = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_meta_eval.jsonl' +INDEX_STR_TEMPLATE = 'pairwise_{model_or_physician}_{metric}_{pred_str}' +CLUSTER_STR_TEMPLATE = '{cluster}: {index_str}' + +HEALTHBENCH_META_HTML_JINJA = (common.HTML_JINJA.replace( + '

Correct Answer: {{ correct_answer }}

\n', + '', +) + "

Explanation for grader's label: {{ explanation }}

") + + +class HealthBenchMetaEval(Eval): + + def __init__( + self, + grader_model: SamplerBase, + num_examples: int | None = None, + n_threads: int = 120, + n_repeats: int = 1, + ): + with bf.BlobFile(INPUT_PATH, 'rb') as f: + examples = [json.loads(line) for line in f] + print(f'Loaded {len(examples)} examples from {INPUT_PATH}') + + rng = random.Random(0) + + if num_examples is not None and len(examples) > num_examples: + examples = rng.sample(examples, num_examples) + + self.examples = examples * n_repeats + self.grader_model = grader_model + self.n_threads = n_threads + + def grade_sample( + self, + grading_response_dict: dict, + physician_labels: list[bool], + category: str, + ) -> tuple[dict, bool | None, str]: + metrics = { + 'num_physician_labels': len(physician_labels), + 'percent_physician_pos': + sum(physician_labels) / len(physician_labels), + } + + grader_label = grading_response_dict['criteria_met'] + assert grader_label is True or grader_label is False + metrics['model_predicted_positive'] = grader_label + explanation = grading_response_dict.get('explanation', + 'No explanation provided') + + category_metrics = {f'{category}: {k}': v for k, v in metrics.items()} + metrics = {**metrics, **category_metrics} + return metrics, grader_label, explanation + + def __call__(self, sampler: SamplerBase) -> EvalResult: + + def fn(row: dict) -> tuple[SingleEvalResult, bool | None]: + convo_with_response = row['prompt'] + [ + dict(content=row['completion'], role='assistant') + ] + prompt_str = '\n\n'.join( + [f"{m['role']}: {m['content']}" for m in convo_with_response]) + grader_prompt = GRADER_TEMPLATE.replace('<>', + prompt_str) + grader_prompt = grader_prompt.replace('<>', + row['rubric']) + grader_convo = [dict(content=grader_prompt, role='user')] + + while True: + sampler_response = sampler(grader_convo) + response_text = sampler_response.response_text + actual_queried_grader_convo = ( + sampler_response.actual_queried_message_list) + grading_response_dict = parse_json_to_dict(response_text) + if 'criteria_met' in grading_response_dict: + label = grading_response_dict['criteria_met'] + if label is True or label is False: + break + print('Grading failed due to bad JSON output, retrying...') + + metrics, grader_label, explanation = self.grade_sample( + grading_response_dict=grading_response_dict, + physician_labels=row['binary_labels'], + category=row['category'], + ) + score = metrics['model_predicted_positive'] + + # Create HTML for each sample result + html = common.jinja_env.from_string( + HEALTHBENCH_META_HTML_JINJA).render( + prompt_messages=actual_queried_grader_convo, + next_message=dict(content=response_text, role='assistant'), + score=metrics['model_predicted_positive'], + extracted_answer=response_text, + explanation=explanation, + ) + convo = actual_queried_grader_convo + [ + dict(content=response_text, role='assistant') + ] + return ( + SingleEvalResult(html=html, + score=score, + convo=convo, + metrics=metrics), + grader_label, + ) + + # Run evaluation and collect results + all_outputs = common.map_with_progress(fn, self.examples, + self.n_threads) + results: list[SingleEvalResult] + grader_labels: list[bool] + results, grader_labels = zip(*all_outputs) + + # model pairwise agreement metrics + model_agreement_metrics = compute_metrics_for_rater_by_class( + self_pred_list=grader_labels, + other_preds_list=[x['binary_labels'] for x in self.examples], + cluster_list=[x['category'] for x in self.examples], + model_or_physician='model', + ) + + # physicians: + physician_rating_lists = defaultdict(lambda: ([], [], [])) + for example in self.examples: + for i in range(len(example['binary_labels'])): + physician_id = example['anonymized_physician_ids'][i] + self_pred = example['binary_labels'][i] + other_preds = (example['binary_labels'][:i] + + example['binary_labels'][i + 1:]) + cluster = example['category'] + physician_rating_lists[physician_id][0].append(self_pred) + physician_rating_lists[physician_id][1].append(other_preds) + physician_rating_lists[physician_id][2].append(cluster) + + physician_agreement_metric_lists = defaultdict(dict) + for physician_id, ( + physician_rating_list, + other_preds_list, + cluster_list, + ) in physician_rating_lists.items(): + physician_agreement_metrics = compute_metrics_for_rater_by_class( + self_pred_list=physician_rating_list, + other_preds_list=other_preds_list, + cluster_list=cluster_list, + model_or_physician='physician', + ) + for k, v in physician_agreement_metrics.items(): + physician_agreement_metric_lists[k][physician_id] = v + + # consolidate final metrics and add agreement metrics + final_metrics = common.aggregate_results( + results, default_stats=('mean', 'n_samples', 'bootstrap_std')) + model_agreement_metrics_condensed: dict[str, float] = { + k: v['value'] + for k, v in model_agreement_metrics.items() + if v['value'] is not None + } + assert final_metrics.metrics is not None + final_metrics.metrics.update(model_agreement_metrics_condensed) + final_metrics.score = final_metrics.metrics[ + 'pairwise_model_f1_balanced'] + + final_metrics.metadata = { + 'model_agreement_metrics': model_agreement_metrics, + 'physician_agreement_metric_lists': + physician_agreement_metric_lists, + } + return final_metrics + + +def compute_metrics_for_rater_by_class( + self_pred_list: list[bool], + other_preds_list: list[list[bool]], + cluster_list: list[str], + model_or_physician: Literal['model', 'physician'], +) -> dict[str, dict[str, float | None]]: + # get all the metrics for each cluster + metric_lists = defaultdict(list) + for self_pred, other_preds, cluster in zip(self_pred_list, + other_preds_list, + cluster_list, + strict=True): + self_pred_str = 'pos' if self_pred else 'neg' + for other_pred in other_preds: + # precision. based on the grader's labels - + # i.e., calculated as TP / (TP + FP) + # so a prediction should be recorded whenever self_pred is True + precision_index_str = INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, + metric='precision', + pred_str=self_pred_str, + ) + metric_lists[precision_index_str].append(self_pred == other_pred) + precision_cluster_str = CLUSTER_STR_TEMPLATE.format( + cluster=cluster, index_str=precision_index_str) + metric_lists[precision_cluster_str].append(self_pred == other_pred) + + # recall. based on the ground truth labels - + # i.e., calculated as TP / (TP + FN) + # so a prediction should be recorded whenever other_pred is True + other_pred_str = 'pos' if other_pred else 'neg' + recall_index_str = INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, + metric='recall', + pred_str=other_pred_str, + ) + metric_lists[recall_index_str].append(self_pred == other_pred) + recall_cluster_str = CLUSTER_STR_TEMPLATE.format( + cluster=cluster, index_str=recall_index_str) + metric_lists[recall_cluster_str].append(self_pred == other_pred) + + metrics: dict[str, dict[str, float | None]] = {} + for index_str, metric_list in metric_lists.items(): + n = len(metric_list) + metric = sum(metric_list) / n if n > 0 else None + metrics[index_str] = { + 'n': n, + 'value': metric, + } + + f1_metrics = get_f1_metrics(metrics) + metrics.update(f1_metrics) + + balanced_metrics = get_balanced_metrics(metrics) + metrics.update(balanced_metrics) + + return metrics + + +def get_f1_metrics( + metrics: dict[str, dict[str, float | None]], +) -> dict[str, dict[str, float | None]]: + f1_metrics: dict[str, dict[str, float | None]] = {} + for precision_key_name in metrics: + if 'precision' in precision_key_name: + recall_key_name = precision_key_name.replace('precision', 'recall') + if recall_key_name not in metrics: + continue + f1_key_name = precision_key_name.replace('precision', 'f1') + assert f1_key_name not in metrics + f1_metrics[f1_key_name] = compute_f1_metric( + precision=metrics[precision_key_name], + recall=metrics[recall_key_name], + ) + + return f1_metrics + + +def compute_f1_metric( + precision: dict[str, float | None], + recall: dict[str, float | None], +) -> dict[str, float | None]: + precision_n = precision['n'] + recall_n = recall['n'] + assert precision_n is not None and recall_n is not None, 'n_pos or n_neg is None' + + precision_metric = precision['value'] + recall_metric = recall['value'] + if precision_metric is None or recall_metric is None: + f1_metric = None + n_f1 = ( + precision_n + recall_n + ) # precision_metric is None iff precision_n = 0 and recall_metric is None iff recall_n = 0, so if either is zero this gives TP + FN + FP without double counting + elif precision_metric == 0 and recall_metric == 0: + f1_metric = 0.0 + tp = precision_metric * precision_n # because precision = TP / (TP+FP) + n_f1 = precision_n + recall_n - tp # TP+FP + TP+FN − TP + else: + f1_metric = (2 * (precision_metric * recall_metric) / + (precision_metric + recall_metric)) + tp = precision_metric * precision_n # because precision = TP / (TP+FP) + n_f1 = precision_n + recall_n - tp # TP+FP + TP+FN − TP + + return { + 'n': n_f1, + 'value': f1_metric, + } + + +def get_balanced_metrics( + metrics: dict[str, dict[str, float | None]], +) -> dict[str, dict[str, float | None]]: + balanced_metrics: dict[str, dict[str, float | None]] = {} + for pos_key_name in metrics: + if 'pos' in pos_key_name: + neg_key_name = pos_key_name.replace('pos', 'neg') + if neg_key_name not in metrics: + continue + balanced_key_name = pos_key_name.replace('pos', 'balanced') + assert balanced_key_name not in metrics + balanced_metrics[balanced_key_name] = compute_balanced_metric( + metric_pos=metrics[pos_key_name], + metric_neg=metrics[neg_key_name], + ) + + return balanced_metrics + + +def compute_balanced_metric( + metric_pos: dict[str, float | None], + metric_neg: dict[str, float | None], +) -> dict[str, float | None]: + n_pos = metric_pos['n'] + n_neg = metric_neg['n'] + assert n_pos is not None and n_neg is not None, 'n_pos or n_neg is None' + + pos_metric = metric_pos['value'] + neg_metric = metric_neg['value'] + if pos_metric is None or neg_metric is None: + metric = None + else: + metric = (pos_metric + neg_metric) / 2 + + return { + 'n': n_pos + n_neg, + # note: this overcounts samples going towards the balanced F1 + 'value': metric, + } diff --git a/opencompass/datasets/healthbench/healthbench_meta_eval_test.py b/opencompass/datasets/healthbench/healthbench_meta_eval_test.py new file mode 100644 index 00000000..eee9972d --- /dev/null +++ b/opencompass/datasets/healthbench/healthbench_meta_eval_test.py @@ -0,0 +1,165 @@ +from . import healthbench_meta_eval + + +def test_compute_agreement_for_rater_by_class(): + self_pred_list = [True, False, True] + other_preds_list = [[True, True, False], [True, False], [False]] + cluster_list = ['a', 'a', 'b'] + model_or_physician = 'model' + metrics = healthbench_meta_eval.compute_metrics_for_rater_by_class( + self_pred_list, other_preds_list, cluster_list, model_or_physician + ) + + # precision overall + index_str_pos_precision = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, metric='precision', pred_str='pos' + ) + index_str_neg_precision = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, metric='precision', pred_str='neg' + ) + overall_pos_precision = metrics[index_str_pos_precision] + overall_neg_precision = metrics[index_str_neg_precision] + expected_overall_pos_precision = (2 + 0 + 0) / (3 + 0 + 1) + expected_overall_neg_precision = (0 + 1 + 0) / (0 + 2 + 0) + assert overall_pos_precision['value'] == expected_overall_pos_precision + assert overall_neg_precision['value'] == expected_overall_neg_precision + assert overall_pos_precision['n'] == 4 + assert overall_neg_precision['n'] == 2 + + # recall overall + index_str_pos_recall = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, metric='recall', pred_str='pos' + ) + index_str_neg_recall = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, metric='recall', pred_str='neg' + ) + overall_pos_recall = metrics[index_str_pos_recall] + overall_neg_recall = metrics[index_str_neg_recall] + expected_overall_pos_recall = (2 + 0 + 0) / (2 + 1 + 0) + expected_overall_neg_recall = (0 + 1 + 0) / (1 + 1 + 1) + assert overall_pos_recall['value'] == expected_overall_pos_recall + assert overall_neg_recall['value'] == expected_overall_neg_recall + assert overall_pos_recall['n'] == 3 + assert overall_neg_recall['n'] == 3 + + # f1 overall + index_str_pos_f1 = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, metric='f1', pred_str='pos' + ) + index_str_neg_f1 = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, metric='f1', pred_str='neg' + ) + overall_pos_f1 = metrics[index_str_pos_f1] + overall_neg_f1 = metrics[index_str_neg_f1] + expected_overall_pos_f1 = ( + 2 + * expected_overall_pos_precision + * expected_overall_pos_recall + / (expected_overall_pos_precision + expected_overall_pos_recall) + ) + expected_overall_neg_f1 = ( + 2 + * expected_overall_neg_precision + * expected_overall_neg_recall + / (expected_overall_neg_precision + expected_overall_neg_recall) + ) + assert overall_pos_f1['value'] == expected_overall_pos_f1 + assert overall_neg_f1['value'] == expected_overall_neg_f1 + + # balanced f1 + index_str_balanced_f1 = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( + model_or_physician=model_or_physician, metric='f1', pred_str='balanced' + ) + balanced_f1 = metrics[index_str_balanced_f1] + expected_balanced_f1 = (expected_overall_pos_f1 + expected_overall_neg_f1) / 2 + assert balanced_f1['value'] == expected_balanced_f1 + + # by cluster + # precision + cluster_a_str_pos_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='a', index_str=index_str_pos_precision + ) + cluster_a_str_neg_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='a', index_str=index_str_neg_precision + ) + cluster_a_pos_precision = metrics[cluster_a_str_pos_precision] + cluster_a_neg_precision = metrics[cluster_a_str_neg_precision] + assert cluster_a_pos_precision['value'] == ( + # example 1, 2 in order + (2 + 0) / (3 + 0) + ) + assert cluster_a_neg_precision['value'] == ( + # example 1, 2 in order + (0 + 1) / (0 + 2) + ) + assert cluster_a_pos_precision['n'] == 3 + assert cluster_a_neg_precision['n'] == 2 + + # recall + cluster_a_str_pos_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='a', index_str=index_str_pos_recall + ) + cluster_a_str_neg_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='a', index_str=index_str_neg_recall + ) + cluster_a_pos_recall = metrics[cluster_a_str_pos_recall] + cluster_a_neg_recall = metrics[cluster_a_str_neg_recall] + assert cluster_a_pos_recall['value'] == ( + # example 1, 2 in order + (2 + 0) / (2 + 1) + ) + assert cluster_a_neg_recall['value'] == ( + # example 1, 2 in order + (0 + 1) / (1 + 1) + ) + assert cluster_a_pos_recall['n'] == 3 + assert cluster_a_neg_recall['n'] == 2 + + # cluster B + # precision + cluster_b_str_pos_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='b', index_str=index_str_pos_precision + ) + cluster_b_str_neg_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='b', index_str=index_str_neg_precision + ) + cluster_b_str_pos_precision = metrics[cluster_b_str_pos_precision] + assert cluster_b_str_neg_precision not in metrics + assert cluster_b_str_pos_precision['value'] == ( + # example 3 only + 0 / 1 + ) + assert cluster_b_str_pos_precision['n'] == 1 + + # recall + cluster_b_str_pos_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='b', index_str=index_str_pos_recall + ) + cluster_b_str_neg_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='b', index_str=index_str_neg_recall + ) + assert cluster_b_str_pos_recall not in metrics + cluster_b_neg_recall = metrics[cluster_b_str_neg_recall] + assert cluster_b_neg_recall['value'] == ( + # example 3 only + 0 / 1 + ) + assert cluster_b_neg_recall['n'] == 1 + + # f1 + index_str_pos_f1 = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='b', index_str=index_str_pos_f1 + ) + index_str_neg_f1 = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='b', index_str=index_str_neg_f1 + ) + index_str_balanced_f1 = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( + cluster='b', index_str=index_str_balanced_f1 + ) + assert index_str_pos_f1 not in metrics + assert index_str_neg_f1 not in metrics + assert index_str_balanced_f1 not in metrics + + +if __name__ == '__main__': + test_compute_agreement_for_rater_by_class() diff --git a/opencompass/datasets/healthbench/sampler/chat_completion_sampler.py b/opencompass/datasets/healthbench/sampler/chat_completion_sampler.py new file mode 100644 index 00000000..a6dc86a7 --- /dev/null +++ b/opencompass/datasets/healthbench/sampler/chat_completion_sampler.py @@ -0,0 +1,99 @@ +import time +from typing import Any + +import openai +from openai import OpenAI + +from ..types import MessageList, SamplerBase, SamplerResponse + +OPENAI_SYSTEM_MESSAGE_API = 'You are a helpful assistant.' +OPENAI_SYSTEM_MESSAGE_CHATGPT = ( + 'You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.' + + '\nKnowledge cutoff: 2023-12\nCurrent date: 2024-04-01') + +import os + + +class ChatCompletionSampler(SamplerBase): + """Sample from OpenAI's chat completion API.""" + + def __init__( + self, + model: str = 'gpt-3.5-turbo', + system_message: str | None = None, + temperature: float = 0.5, + max_tokens: int = 1024, + ): + self.api_key_name = 'OPENAI_API_KEY' + self.client = OpenAI( + base_url=os.getenv('OC_JUDGE_API_BASE'), + api_key=os.getenv('OC_JUDGE_API_KEY'), + # OC_JUDGE_MODEL + ) + # using api_key=os.environ.get("OPENAI_API_KEY") # please set your API_KEY + self.model = model + self.system_message = system_message + self.temperature = temperature + self.max_tokens = max_tokens + self.image_format = 'url' + + def _handle_image( + self, + image: str, + encoding: str = 'base64', + format: str = 'png', + fovea: int = 768, + ): + new_image = { + 'type': 'image_url', + 'image_url': { + 'url': f'data:image/{format};{encoding},{image}', + }, + } + return new_image + + def _handle_text(self, text: str): + return {'type': 'text', 'text': text} + + def _pack_message(self, role: str, content: Any): + return {'role': str(role), 'content': content} + + def __call__(self, message_list: MessageList) -> SamplerResponse: + if self.system_message: + message_list = [self._pack_message('system', self.system_message) + ] + message_list + trial = 0 + while True: + try: + response = self.client.chat.completions.create( + model=self.model, + messages=message_list, + temperature=self.temperature, + max_tokens=self.max_tokens, + ) + content = response.choices[0].message.content + if content is None: + raise ValueError( + 'OpenAI API returned empty response; retrying') + return SamplerResponse( + response_text=content, + response_metadata={'usage': response.usage}, + actual_queried_message_list=message_list, + ) + # NOTE: BadRequestError is triggered once for MMMU, please uncomment if you are reruning MMMU + except openai.BadRequestError as e: + print('Bad Request Error', e) + return SamplerResponse( + response_text='No response (bad request).', + response_metadata={'usage': None}, + actual_queried_message_list=message_list, + ) + except Exception as e: + exception_backoff = 2**trial # expontial back off + print( + f'Rate limit exception so wait and retry {trial} after {exception_backoff} sec', + e, + ) + time.sleep(exception_backoff) + trial += 1 + # unknown error shall throw exception diff --git a/opencompass/datasets/healthbench/sampler/claude_sampler.py b/opencompass/datasets/healthbench/sampler/claude_sampler.py new file mode 100644 index 00000000..780a9953 --- /dev/null +++ b/opencompass/datasets/healthbench/sampler/claude_sampler.py @@ -0,0 +1,103 @@ +import os +import time + +import anthropic + +from .. import common +from ..types import MessageList, SamplerBase, SamplerResponse + +CLAUDE_SYSTEM_MESSAGE_LMSYS = ( + 'The assistant is Claude, created by Anthropic. The current date is ' + "{currentDateTime}. Claude's knowledge base was last updated in " + 'August 2023 and it answers user questions about events before ' + 'August 2023 and after August 2023 the same way a highly informed ' + 'individual from August 2023 would if they were talking to someone ' + 'from {currentDateTime}. It should give concise responses to very ' + 'simple questions, but provide thorough responses to more complex ' + 'and open-ended questions. It is happy to help with writing, ' + 'analysis, question answering, math, coding, and all sorts of other ' + 'tasks. It uses markdown for coding. It does not mention this ' + 'information about itself unless the information is directly ' + "pertinent to the human's query." +).format(currentDateTime='2024-04-01') +# reference: https://github.com/lm-sys/FastChat/blob/7899355ebe32117fdae83985cf8ee476d2f4243f/fastchat/conversation.py#L894 + + +class ClaudeCompletionSampler(SamplerBase): + + def __init__( + self, + model: str, + system_message: str | None = None, + temperature: float = 0.0, # default in Anthropic example + max_tokens: int = 4096, + ): + self.client = anthropic.Anthropic() + self.api_key = os.environ.get('ANTHROPIC_API_KEY') # please set your API_KEY + self.model = model + self.system_message = system_message + self.temperature = temperature + self.max_tokens = max_tokens + self.image_format = 'base64' + + def _handle_image( + self, + image: str, + encoding: str = 'base64', + format: str = 'png', + fovea: int = 768, + ): + new_image = { + 'type': 'image', + 'source': { + 'type': encoding, + 'media_type': f'image/{format}', + 'data': image, + }, + } + return new_image + + def _handle_text(self, text): + return {'type': 'text', 'text': text} + + def _pack_message(self, role, content): + return {'role': str(role), 'content': content} + + def __call__(self, message_list: MessageList) -> SamplerResponse: + trial = 0 + while True: + try: + if not common.has_only_user_assistant_messages(message_list): + raise ValueError(f'Claude sampler only supports user and assistant messages, got {message_list}') + if self.system_message: + response_message = self.client.messages.create( + model=self.model, + system=self.system_message, + max_tokens=self.max_tokens, + temperature=self.temperature, + messages=message_list, + ) + claude_input_messages: MessageList = [{'role': 'system', 'content': self.system_message}] + message_list + else: + response_message = self.client.messages.create( + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + messages=message_list, + ) + claude_input_messages = message_list + response_text = response_message.content[0].text + return SamplerResponse( + response_text=response_text, + response_metadata={}, + actual_queried_message_list=claude_input_messages, + ) + except anthropic.RateLimitError as e: + exception_backoff = 2**trial # expontial back off + print( + f'Rate limit exception so wait and retry {trial} after {exception_backoff} sec', + e, + ) + time.sleep(exception_backoff) + trial += 1 + # unknown error shall throw exception diff --git a/opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py b/opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py new file mode 100644 index 00000000..39e115e5 --- /dev/null +++ b/opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py @@ -0,0 +1,78 @@ +import time +from typing import Any + +import openai +from openai import OpenAI + +from ..types import MessageList, SamplerBase, SamplerResponse + + +class OChatCompletionSampler(SamplerBase): + """Sample from OpenAI's chat completion API for o series models.""" + + def __init__( + self, + *, + reasoning_effort: str | None = None, + model: str = 'o1-mini', + ): + self.api_key_name = 'OPENAI_API_KEY' + self.client = OpenAI() + # using api_key=os.environ.get("OPENAI_API_KEY") # please set your API_KEY + self.model = model + self.image_format = 'url' + self.reasoning_effort = reasoning_effort + + def _handle_image( + self, + image: str, + encoding: str = 'base64', + format: str = 'png', + fovea: int = 768, + ): + new_image = { + 'type': 'image_url', + 'image_url': { + 'url': f'data:image/{format};{encoding},{image}', + }, + } + return new_image + + def _handle_text(self, text: str): + return {'type': 'text', 'text': text} + + def _pack_message(self, role: str, content: Any): + return {'role': str(role), 'content': content} + + def __call__(self, message_list: MessageList) -> SamplerResponse: + trial = 0 + while True: + try: + response = self.client.chat.completions.create( + model=self.model, + messages=message_list, + reasoning_effort=self.reasoning_effort, + ) + content = response.choices[0].message.content + return SamplerResponse( + response_text=content, + response_metadata={'usage': response.usage}, + actual_queried_message_list=message_list, + ) + # NOTE: BadRequestError is triggered once for MMMU, please uncomment if you are reruning MMMU + except openai.BadRequestError as e: + print('Bad Request Error', e) + return SamplerResponse( + response_text='', + response_metadata={'usage': None}, + actual_queried_message_list=message_list, + ) + except Exception as e: + exception_backoff = 2**trial # expontial back off + print( + f'Rate limit exception so wait and retry {trial} after {exception_backoff} sec', + e, + ) + time.sleep(exception_backoff) + trial += 1 + # unknown error shall throw exception diff --git a/opencompass/datasets/healthbench/sampler/responses_sampler.py b/opencompass/datasets/healthbench/sampler/responses_sampler.py new file mode 100644 index 00000000..a152cbaf --- /dev/null +++ b/opencompass/datasets/healthbench/sampler/responses_sampler.py @@ -0,0 +1,97 @@ +import os +import time +from typing import Any + +import openai +from openai import OpenAI + +from ..types import MessageList, SamplerBase, SamplerResponse + + +class ResponsesSampler(SamplerBase): + """Sample from OpenAI's responses API.""" + + def __init__( + self, + model: str = 'gpt-4.1', + system_message: str | None = None, + temperature: float = 0.5, + max_tokens: int = 1024, + reasoning_model: bool = False, + reasoning_effort: str | None = None, + ): + self.api_key_name = 'OPENAI_API_KEY' + assert os.environ.get('OPENAI_API_KEY'), 'Please set OPENAI_API_KEY' + self.client = OpenAI() + self.model = model + self.system_message = system_message + self.temperature = temperature + self.max_tokens = max_tokens + self.image_format = 'url' + self.reasoning_model = reasoning_model + self.reasoning_effort = reasoning_effort + + def _handle_image( + self, + image: str, + encoding: str = 'base64', + format: str = 'png', + fovea: int = 768, + ) -> dict[str, Any]: + new_image = { + 'type': 'input_image', + 'image_url': f'data:image/{format};{encoding},{image}', + } + return new_image + + def _handle_text(self, text: str) -> dict[str, Any]: + return {'type': 'input_text', 'text': text} + + def _pack_message(self, role: str, content: Any) -> dict[str, Any]: + return {'role': role, 'content': content} + + def __call__(self, message_list: MessageList) -> SamplerResponse: + if self.system_message: + message_list = [ + self._pack_message('developer', self.system_message) + ] + message_list + trial = 0 + while True: + try: + if self.reasoning_model: + reasoning = ({ + 'effort': self.reasoning_effort + } if self.reasoning_effort else None) + response = self.client.responses.create( + model=self.model, + input=message_list, + reasoning=reasoning, + ) + else: + response = self.client.responses.create( + model=self.model, + input=message_list, + temperature=self.temperature, + max_output_tokens=self.max_tokens, + ) + return SamplerResponse( + response_text=response.output_text, + response_metadata={'usage': response.usage}, + actual_queried_message_list=message_list, + ) + except openai.BadRequestError as e: + print('Bad Request Error', e) + return SamplerResponse( + response_text='', + response_metadata={'usage': None}, + actual_queried_message_list=message_list, + ) + except Exception as e: + exception_backoff = 2**trial # expontial back off + print( + f'Rate limit exception so wait and retry {trial} after {exception_backoff} sec', + e, + ) + time.sleep(exception_backoff) + trial += 1 + # unknown error shall throw exception diff --git a/opencompass/openicl/icl_prompt_template.py b/opencompass/openicl/icl_prompt_template.py index db2dcc91..811a4006 100644 --- a/opencompass/openicl/icl_prompt_template.py +++ b/opencompass/openicl/icl_prompt_template.py @@ -257,3 +257,22 @@ class PromptTemplate: prompt.append(dict(section='end', pos='end')) return prompt + + +class HealthBenchTemplate: + + def __init__( + self, + key: Union[Dict, str], + ) -> None: + self.key = key + + def generate_item(self, entry: Dict, **kwargs): + template = [{'section': 'round', 'pos': 'begin'}] + end_template = [{'section': 'round', 'pos': 'end'}] + mid = entry[self.key] + template = template + mid + end_template + ret = PromptList() + for item in template: + ret.append(item) + return ret From 95d8d2ba4d3fed550614cb094a1f683a09457483 Mon Sep 17 00:00:00 2001 From: huihui Date: Fri, 16 May 2025 12:52:07 +0000 Subject: [PATCH 2/8] fix irrelevant files --- opencompass/datasets/healthbench/gpqa_eval.py | 88 ----- .../datasets/healthbench/healthbench.py | 8 +- .../healthbench/healthbench_eval_test.py | 32 -- .../datasets/healthbench/healthbench_meta.py | 262 -------------- .../healthbench/healthbench_meta_eval.py | 339 ------------------ .../healthbench/healthbench_meta_eval_test.py | 165 --------- .../healthbench/sampler/claude_sampler.py | 103 ------ .../sampler/o_chat_completion_sampler.py | 78 ---- .../healthbench/sampler/responses_sampler.py | 97 ----- opencompass/datasets/healthbench/types.py | 55 +++ 10 files changed, 58 insertions(+), 1169 deletions(-) delete mode 100644 opencompass/datasets/healthbench/gpqa_eval.py delete mode 100644 opencompass/datasets/healthbench/healthbench_eval_test.py delete mode 100644 opencompass/datasets/healthbench/healthbench_meta.py delete mode 100644 opencompass/datasets/healthbench/healthbench_meta_eval.py delete mode 100644 opencompass/datasets/healthbench/healthbench_meta_eval_test.py delete mode 100644 opencompass/datasets/healthbench/sampler/claude_sampler.py delete mode 100644 opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py delete mode 100644 opencompass/datasets/healthbench/sampler/responses_sampler.py create mode 100644 opencompass/datasets/healthbench/types.py diff --git a/opencompass/datasets/healthbench/gpqa_eval.py b/opencompass/datasets/healthbench/gpqa_eval.py deleted file mode 100644 index 13f09b4b..00000000 --- a/opencompass/datasets/healthbench/gpqa_eval.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -GPQA: A Graduate-Level Google-Proof Q&A Benchmark -David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, Samuel R. Bowman -https://arxiv.org/abs/2311.12022 -""" - -import random -import re - -import pandas - -from . import common -from .common import (ANSWER_PATTERN_MULTICHOICE, HTML_JINJA, - format_multichoice_question) -from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult - - -class GPQAEval(Eval): - - def __init__( - self, - n_repeats: int = 4, - variant: str = 'diamond', - num_examples: int - | None = None, # restrict to a subset of the data for debugging - ): - df = pandas.read_csv( - f'https://openaipublic.blob.core.windows.net/simple-evals/gpqa_{variant}.csv' - ) - examples = [row.to_dict() for _, row in df.iterrows()] - rng = random.Random(0) - if num_examples: - assert n_repeats == 1, 'n_repeats only supported for num_examples = None' - examples = rng.sample(examples, num_examples) - examples = examples * n_repeats - examples = [ - example | { - 'permutation': rng.sample(range(4), 4) - } for example in examples - ] - self.examples = examples - self.n_repeats = n_repeats - - def __call__(self, sampler: SamplerBase) -> EvalResult: - - def fn(row: dict): - choices = [ - row['Correct Answer'], - row['Incorrect Answer 1'], - row['Incorrect Answer 2'], - row['Incorrect Answer 3'], - ] - choices = [choices[i] for i in row['permutation']] - correct_index = choices.index(row['Correct Answer']) - correct_answer = 'ABCD'[correct_index] - choices_dict = dict(A=choices[0], - B=choices[1], - C=choices[2], - D=choices[3], - Question=row['Question']) - prompt_messages = [ - sampler._pack_message( - content=format_multichoice_question(choices_dict), - role='user') - ] - sampler_response = sampler(prompt_messages) - response_text = sampler_response.response_text - actual_queried_prompt_messages = sampler_response.actual_queried_message_list - match = re.search(ANSWER_PATTERN_MULTICHOICE, response_text) - extracted_answer = match.group(1) if match else None - score = 1.0 if extracted_answer == correct_answer else 0.0 - html = common.jinja_env.from_string(HTML_JINJA).render( - prompt_messages=actual_queried_prompt_messages, - next_message=dict(content=response_text, role='assistant'), - score=score, - correct_answer=correct_answer, - extracted_answer=extracted_answer, - ) - convo = actual_queried_prompt_messages + [ - dict(content=response_text, role='assistant') - ] - return SingleEvalResult(html=html, - score=score, - convo=convo, - metrics={'chars': len(response_text)}) - - results = common.map_with_progress(fn, self.examples) - return common.aggregate_results(results) diff --git a/opencompass/datasets/healthbench/healthbench.py b/opencompass/datasets/healthbench/healthbench.py index b53d2bb8..fe084d63 100644 --- a/opencompass/datasets/healthbench/healthbench.py +++ b/opencompass/datasets/healthbench/healthbench.py @@ -1,16 +1,14 @@ import json import re -from datasets import Dataset, load_dataset +from datasets import load_dataset from opencompass.openicl import BaseEvaluator -from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS -from opencompass.utils import get_logger +from opencompass.registry import LOAD_DATASET from ..base import BaseDataset from . import common -from .healthbench_eval import HealthBenchEval, RubricItem -from .healthbench_meta_eval import HealthBenchMetaEval +from .healthbench_eval import RubricItem from .sampler.chat_completion_sampler import ChatCompletionSampler from .types import SingleEvalResult diff --git a/opencompass/datasets/healthbench/healthbench_eval_test.py b/opencompass/datasets/healthbench/healthbench_eval_test.py deleted file mode 100644 index c2bb64d4..00000000 --- a/opencompass/datasets/healthbench/healthbench_eval_test.py +++ /dev/null @@ -1,32 +0,0 @@ -from .healthbench_eval import RubricItem, calculate_score - - -def test_calculate_score(): - rubric_items = [ - RubricItem(criterion='test', points=7, tags=[]), - RubricItem(criterion='test', points=5, tags=[]), - RubricItem(criterion='test', points=10, tags=[]), - RubricItem(criterion='test', points=-6, tags=[]), - ] - grading_response_list = [ - { - 'criteria_met': True - }, - { - 'criteria_met': False - }, - { - 'criteria_met': True - }, - { - 'criteria_met': True - }, - ] - total_possible = 7 + 5 + 10 - achieved = 7 + 0 + 10 - 6 - assert (calculate_score(rubric_items, grading_response_list) == achieved / - total_possible) - - -if __name__ == '__main__': - test_calculate_score() diff --git a/opencompass/datasets/healthbench/healthbench_meta.py b/opencompass/datasets/healthbench/healthbench_meta.py deleted file mode 100644 index 64ce8de1..00000000 --- a/opencompass/datasets/healthbench/healthbench_meta.py +++ /dev/null @@ -1,262 +0,0 @@ -import re - -from datasets import Dataset, load_dataset - -from opencompass.openicl import BaseEvaluator -from opencompass.registry import LOAD_DATASET, TEXT_POSTPROCESSORS -from opencompass.utils import get_logger - -from ..base import BaseDataset -from .healthbench_eval import HealthBenchEval, RubricItem -from .healthbench_meta_eval import HealthBenchMetaEval - - -def _parse(item): - item['rubrics'] = [RubricItem.from_dict(d) for d in item['rubrics']] - return item - -def _parse_meta(item): - item['rubrics'] = [RubricItem.from_dict(d) for d in item['rubrics']] - return item - -@LOAD_DATASET.register_module() -class HealthBenchDataset(BaseDataset): - - @staticmethod - def load(path: str, prompt_mode: str, **kwargs): - subset = kwargs.get('subset') - # nrepeats=1 - # nthreads = 1 - match subset: - case 'healthbench': - data_files = {'test': '2025-05-07-06-14-12_oss_eval.jsonl'} - return HealthBenchEval( - grader_model=grading_sampler, - n_repeats=1, - n_threads=1, - subset_name=None, - ) - case 'healthbench_hard': - data_files = {'test': 'hard_2025-05-08-21-00-10.jsonl'} - return HealthBenchEval( - grader_model=grading_sampler, - n_repeats=1, - n_threads=1, - subset_name='hard', - ) - case 'healthbench_consensus': - data_files = {'test': 'consensus_2025-05-09-20-00-46.jsonl'} - return HealthBenchEval( - grader_model=grading_sampler, - n_repeats=1, - n_threads=1, - subset_name='consensus', - ) - case 'healthbench_meta': - data_files = {'test': '2025-05-07-06-14-12_oss_meta_eval.jsonl' } - return HealthBenchMetaEval( - grader_model=grading_sampler, - n_repeats=1, - n_threads=1, - ) - case _: - raise Exception(f'Unrecognized eval type: {eval_name}') - - dataset = load_dataset(path, data_files=data_files, split='test') - - dataset = dataset.map(lambda item: _parse(item, prompt_mode)) - - return dataset - - -class HealthBenchEvaluator(BaseEvaluator): - - def score(self, predictions, references, test_set): - method = test_set['prompt_mode'][0] - - if len(predictions) != len(references): - return {'error': 'preds and refrs have different length'} - correct = 0 - count = 0 - details = [] - for idx, (i, j) in enumerate(zip(predictions, references)): - i = answer_cleansing(method, i, test_set['options'][idx], - test_set['label'][idx]) - detail = {'pred': i, 'answer': j, 'correct': False} - count += 1 - if i == j: - correct += 1 - detail['correct'] = True - details.append(detail) - result = {'accuracy': 100 * correct / count, 'details': details} - return result - - -@TEXT_POSTPROCESSORS.register_module() -def answer_cleansing( - method: str, - prediction: str, - options: list, - label: str, -) -> str: - - # Clean up unwanted phrases in the prediction - for unwanted_phrase in [ - 'I understand', - 'A through J', - 'A through E', - 'A through D', - ]: - prediction = prediction.replace(unwanted_phrase, '') - - options_num = len(options) - options = [chr(65 + i) for i in range(options_num)] - options_str = r'\b(' + '|'.join(options) + r')\b' - prediction = re.findall(options_str, prediction) - - if len(prediction) == 0: - prediction = [] - else: - # If there is a "label" and its length is 1, - # process prediction accordingly - if len(label) == 1: - if method == 'few-shot': - answer_flag = True if len(prediction) > 1 else False - # choose the first or last element based on the answer_flag - if answer_flag: - prediction = [prediction[0]] - else: - prediction = [prediction[-1]] - elif method == 'zero-shot': - # choose the first element in list - prediction = [prediction[0]] - else: - raise ValueError('Method is not properly defined ...') - - # Remove trailing period if it exists - if prediction[0] and prediction[0].endswith('.'): - prediction[0] = prediction[0][:-1] - - return prediction[0] - - -def _generic_llmjudge_postprocess(judgement: str): - match = re.search(r'(A|B)', judgement) - grade_letter = (match.group(0) if match else 'B' - ) # Default to "INCORRECT" if no match - return grade_letter - - -def HealthBench_llmjudge_postprocess( - output: dict, - output_path: str, - dataset: Dataset, -) -> dict: - # Get the original dataset - original_dataset = dataset.reader.dataset['test'] - - judged_answers = [] - original_responses = [] - references = [] - details = [] - - # Initialize statistics dictionaries - stats = {'medical_task': {}, 'body_system': {}, 'question_type': {}} - - total_correct = 0 - total_count = 0 - - # Process each sample - for k, v in output.items(): - idx = int(k) # Convert key to integer for indexing - original_responses.append(v['prediction']) - processed_judge = _generic_llmjudge_postprocess(v['prediction']) - - # Get category information from the dataset - sample = original_dataset[idx] - medical_task = sample.get('medical_task', 'unknown') - body_system = sample.get('body_system', 'unknown') - question_type = sample.get('question_type', 'unknown') - - # Initialize category stats if not exists - for level, key in [ - ('medical_task', medical_task), - ('body_system', body_system), - ('question_type', question_type), - ]: - if key not in stats[level]: - stats[level][key] = {'correct': 0, 'total': 0} - - # Record the judgment - if processed_judge is not None: - judged_answers.append(processed_judge) - try: - gold = v['gold'] - references.append(gold) - except KeyError: - get_logger().warning( - f'No gold answer for {k}, use empty string as reference!') - gold = '' - references.append('') - - # Check if the answer is correct (A means correct) - is_correct = processed_judge == 'A' - total_count += 1 - - if is_correct: - total_correct += 1 - # Update category stats - for level, key in [ - ('medical_task', medical_task), - ('body_system', body_system), - ('question_type', question_type), - ]: - stats[level][key]['correct'] += 1 - - # Update category totals - for level, key in [ - ('medical_task', medical_task), - ('body_system', body_system), - ('question_type', question_type), - ]: - stats[level][key]['total'] += 1 - # Add to details - details.append({ - 'id': k, - 'question': sample['question'], - 'options': sample['options'], - 'origin_prompt': v['origin_prompt'], - 'llm_judge': processed_judge, - 'gold': gold, - 'is_correct': is_correct, - 'medical_task': medical_task, - 'body_system': body_system, - 'question_type': question_type, - }) - - # Calculate overall accuracy with two decimal places - overall_accuracy = (round( - (total_correct / total_count * 100), 2) if total_count > 0 else 0.00) - - # Initialize results dictionary - results = { - 'accuracy': overall_accuracy, - 'total_correct': total_correct, - 'total_count': total_count, - 'details': details, - } - - # Calculate accuracy for each category and flatten into results - for level in stats: - for key, value in stats[level].items(): - if value['total'] > 0: - # Calculate accuracy with two decimal places - accuracy = round((value['correct'] / value['total'] * 100), 2) - - # Create a flattened key for the category - flat_key = f'HealthBench-{key}' - - # Add to results - results[flat_key] = accuracy - - return results diff --git a/opencompass/datasets/healthbench/healthbench_meta_eval.py b/opencompass/datasets/healthbench/healthbench_meta_eval.py deleted file mode 100644 index d4e9b748..00000000 --- a/opencompass/datasets/healthbench/healthbench_meta_eval.py +++ /dev/null @@ -1,339 +0,0 @@ -"""This script evaluates a grader model on grading HealthBench rubrics. It -effectively evaluates the evaluator against physician opinion, so we call it a -meta-evaluation. - -To run, use the following command (working directory should contain simple- -evals folder): `python -m simple-evals.simple_evals --eval=healthbench_meta ---model=gpt-4.1` -""" - -import json -import random -from collections import defaultdict -from typing import Literal - -import blobfile as bf - -from . import common -from .healthbench_eval import GRADER_TEMPLATE, parse_json_to_dict -from .types import Eval, EvalResult, SamplerBase, SingleEvalResult - -INPUT_PATH = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_meta_eval.jsonl' -INDEX_STR_TEMPLATE = 'pairwise_{model_or_physician}_{metric}_{pred_str}' -CLUSTER_STR_TEMPLATE = '{cluster}: {index_str}' - -HEALTHBENCH_META_HTML_JINJA = (common.HTML_JINJA.replace( - '

Correct Answer: {{ correct_answer }}

\n', - '', -) + "

Explanation for grader's label: {{ explanation }}

") - - -class HealthBenchMetaEval(Eval): - - def __init__( - self, - grader_model: SamplerBase, - num_examples: int | None = None, - n_threads: int = 120, - n_repeats: int = 1, - ): - with bf.BlobFile(INPUT_PATH, 'rb') as f: - examples = [json.loads(line) for line in f] - print(f'Loaded {len(examples)} examples from {INPUT_PATH}') - - rng = random.Random(0) - - if num_examples is not None and len(examples) > num_examples: - examples = rng.sample(examples, num_examples) - - self.examples = examples * n_repeats - self.grader_model = grader_model - self.n_threads = n_threads - - def grade_sample( - self, - grading_response_dict: dict, - physician_labels: list[bool], - category: str, - ) -> tuple[dict, bool | None, str]: - metrics = { - 'num_physician_labels': len(physician_labels), - 'percent_physician_pos': - sum(physician_labels) / len(physician_labels), - } - - grader_label = grading_response_dict['criteria_met'] - assert grader_label is True or grader_label is False - metrics['model_predicted_positive'] = grader_label - explanation = grading_response_dict.get('explanation', - 'No explanation provided') - - category_metrics = {f'{category}: {k}': v for k, v in metrics.items()} - metrics = {**metrics, **category_metrics} - return metrics, grader_label, explanation - - def __call__(self, sampler: SamplerBase) -> EvalResult: - - def fn(row: dict) -> tuple[SingleEvalResult, bool | None]: - convo_with_response = row['prompt'] + [ - dict(content=row['completion'], role='assistant') - ] - prompt_str = '\n\n'.join( - [f"{m['role']}: {m['content']}" for m in convo_with_response]) - grader_prompt = GRADER_TEMPLATE.replace('<>', - prompt_str) - grader_prompt = grader_prompt.replace('<>', - row['rubric']) - grader_convo = [dict(content=grader_prompt, role='user')] - - while True: - sampler_response = sampler(grader_convo) - response_text = sampler_response.response_text - actual_queried_grader_convo = ( - sampler_response.actual_queried_message_list) - grading_response_dict = parse_json_to_dict(response_text) - if 'criteria_met' in grading_response_dict: - label = grading_response_dict['criteria_met'] - if label is True or label is False: - break - print('Grading failed due to bad JSON output, retrying...') - - metrics, grader_label, explanation = self.grade_sample( - grading_response_dict=grading_response_dict, - physician_labels=row['binary_labels'], - category=row['category'], - ) - score = metrics['model_predicted_positive'] - - # Create HTML for each sample result - html = common.jinja_env.from_string( - HEALTHBENCH_META_HTML_JINJA).render( - prompt_messages=actual_queried_grader_convo, - next_message=dict(content=response_text, role='assistant'), - score=metrics['model_predicted_positive'], - extracted_answer=response_text, - explanation=explanation, - ) - convo = actual_queried_grader_convo + [ - dict(content=response_text, role='assistant') - ] - return ( - SingleEvalResult(html=html, - score=score, - convo=convo, - metrics=metrics), - grader_label, - ) - - # Run evaluation and collect results - all_outputs = common.map_with_progress(fn, self.examples, - self.n_threads) - results: list[SingleEvalResult] - grader_labels: list[bool] - results, grader_labels = zip(*all_outputs) - - # model pairwise agreement metrics - model_agreement_metrics = compute_metrics_for_rater_by_class( - self_pred_list=grader_labels, - other_preds_list=[x['binary_labels'] for x in self.examples], - cluster_list=[x['category'] for x in self.examples], - model_or_physician='model', - ) - - # physicians: - physician_rating_lists = defaultdict(lambda: ([], [], [])) - for example in self.examples: - for i in range(len(example['binary_labels'])): - physician_id = example['anonymized_physician_ids'][i] - self_pred = example['binary_labels'][i] - other_preds = (example['binary_labels'][:i] + - example['binary_labels'][i + 1:]) - cluster = example['category'] - physician_rating_lists[physician_id][0].append(self_pred) - physician_rating_lists[physician_id][1].append(other_preds) - physician_rating_lists[physician_id][2].append(cluster) - - physician_agreement_metric_lists = defaultdict(dict) - for physician_id, ( - physician_rating_list, - other_preds_list, - cluster_list, - ) in physician_rating_lists.items(): - physician_agreement_metrics = compute_metrics_for_rater_by_class( - self_pred_list=physician_rating_list, - other_preds_list=other_preds_list, - cluster_list=cluster_list, - model_or_physician='physician', - ) - for k, v in physician_agreement_metrics.items(): - physician_agreement_metric_lists[k][physician_id] = v - - # consolidate final metrics and add agreement metrics - final_metrics = common.aggregate_results( - results, default_stats=('mean', 'n_samples', 'bootstrap_std')) - model_agreement_metrics_condensed: dict[str, float] = { - k: v['value'] - for k, v in model_agreement_metrics.items() - if v['value'] is not None - } - assert final_metrics.metrics is not None - final_metrics.metrics.update(model_agreement_metrics_condensed) - final_metrics.score = final_metrics.metrics[ - 'pairwise_model_f1_balanced'] - - final_metrics.metadata = { - 'model_agreement_metrics': model_agreement_metrics, - 'physician_agreement_metric_lists': - physician_agreement_metric_lists, - } - return final_metrics - - -def compute_metrics_for_rater_by_class( - self_pred_list: list[bool], - other_preds_list: list[list[bool]], - cluster_list: list[str], - model_or_physician: Literal['model', 'physician'], -) -> dict[str, dict[str, float | None]]: - # get all the metrics for each cluster - metric_lists = defaultdict(list) - for self_pred, other_preds, cluster in zip(self_pred_list, - other_preds_list, - cluster_list, - strict=True): - self_pred_str = 'pos' if self_pred else 'neg' - for other_pred in other_preds: - # precision. based on the grader's labels - - # i.e., calculated as TP / (TP + FP) - # so a prediction should be recorded whenever self_pred is True - precision_index_str = INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, - metric='precision', - pred_str=self_pred_str, - ) - metric_lists[precision_index_str].append(self_pred == other_pred) - precision_cluster_str = CLUSTER_STR_TEMPLATE.format( - cluster=cluster, index_str=precision_index_str) - metric_lists[precision_cluster_str].append(self_pred == other_pred) - - # recall. based on the ground truth labels - - # i.e., calculated as TP / (TP + FN) - # so a prediction should be recorded whenever other_pred is True - other_pred_str = 'pos' if other_pred else 'neg' - recall_index_str = INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, - metric='recall', - pred_str=other_pred_str, - ) - metric_lists[recall_index_str].append(self_pred == other_pred) - recall_cluster_str = CLUSTER_STR_TEMPLATE.format( - cluster=cluster, index_str=recall_index_str) - metric_lists[recall_cluster_str].append(self_pred == other_pred) - - metrics: dict[str, dict[str, float | None]] = {} - for index_str, metric_list in metric_lists.items(): - n = len(metric_list) - metric = sum(metric_list) / n if n > 0 else None - metrics[index_str] = { - 'n': n, - 'value': metric, - } - - f1_metrics = get_f1_metrics(metrics) - metrics.update(f1_metrics) - - balanced_metrics = get_balanced_metrics(metrics) - metrics.update(balanced_metrics) - - return metrics - - -def get_f1_metrics( - metrics: dict[str, dict[str, float | None]], -) -> dict[str, dict[str, float | None]]: - f1_metrics: dict[str, dict[str, float | None]] = {} - for precision_key_name in metrics: - if 'precision' in precision_key_name: - recall_key_name = precision_key_name.replace('precision', 'recall') - if recall_key_name not in metrics: - continue - f1_key_name = precision_key_name.replace('precision', 'f1') - assert f1_key_name not in metrics - f1_metrics[f1_key_name] = compute_f1_metric( - precision=metrics[precision_key_name], - recall=metrics[recall_key_name], - ) - - return f1_metrics - - -def compute_f1_metric( - precision: dict[str, float | None], - recall: dict[str, float | None], -) -> dict[str, float | None]: - precision_n = precision['n'] - recall_n = recall['n'] - assert precision_n is not None and recall_n is not None, 'n_pos or n_neg is None' - - precision_metric = precision['value'] - recall_metric = recall['value'] - if precision_metric is None or recall_metric is None: - f1_metric = None - n_f1 = ( - precision_n + recall_n - ) # precision_metric is None iff precision_n = 0 and recall_metric is None iff recall_n = 0, so if either is zero this gives TP + FN + FP without double counting - elif precision_metric == 0 and recall_metric == 0: - f1_metric = 0.0 - tp = precision_metric * precision_n # because precision = TP / (TP+FP) - n_f1 = precision_n + recall_n - tp # TP+FP + TP+FN − TP - else: - f1_metric = (2 * (precision_metric * recall_metric) / - (precision_metric + recall_metric)) - tp = precision_metric * precision_n # because precision = TP / (TP+FP) - n_f1 = precision_n + recall_n - tp # TP+FP + TP+FN − TP - - return { - 'n': n_f1, - 'value': f1_metric, - } - - -def get_balanced_metrics( - metrics: dict[str, dict[str, float | None]], -) -> dict[str, dict[str, float | None]]: - balanced_metrics: dict[str, dict[str, float | None]] = {} - for pos_key_name in metrics: - if 'pos' in pos_key_name: - neg_key_name = pos_key_name.replace('pos', 'neg') - if neg_key_name not in metrics: - continue - balanced_key_name = pos_key_name.replace('pos', 'balanced') - assert balanced_key_name not in metrics - balanced_metrics[balanced_key_name] = compute_balanced_metric( - metric_pos=metrics[pos_key_name], - metric_neg=metrics[neg_key_name], - ) - - return balanced_metrics - - -def compute_balanced_metric( - metric_pos: dict[str, float | None], - metric_neg: dict[str, float | None], -) -> dict[str, float | None]: - n_pos = metric_pos['n'] - n_neg = metric_neg['n'] - assert n_pos is not None and n_neg is not None, 'n_pos or n_neg is None' - - pos_metric = metric_pos['value'] - neg_metric = metric_neg['value'] - if pos_metric is None or neg_metric is None: - metric = None - else: - metric = (pos_metric + neg_metric) / 2 - - return { - 'n': n_pos + n_neg, - # note: this overcounts samples going towards the balanced F1 - 'value': metric, - } diff --git a/opencompass/datasets/healthbench/healthbench_meta_eval_test.py b/opencompass/datasets/healthbench/healthbench_meta_eval_test.py deleted file mode 100644 index eee9972d..00000000 --- a/opencompass/datasets/healthbench/healthbench_meta_eval_test.py +++ /dev/null @@ -1,165 +0,0 @@ -from . import healthbench_meta_eval - - -def test_compute_agreement_for_rater_by_class(): - self_pred_list = [True, False, True] - other_preds_list = [[True, True, False], [True, False], [False]] - cluster_list = ['a', 'a', 'b'] - model_or_physician = 'model' - metrics = healthbench_meta_eval.compute_metrics_for_rater_by_class( - self_pred_list, other_preds_list, cluster_list, model_or_physician - ) - - # precision overall - index_str_pos_precision = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, metric='precision', pred_str='pos' - ) - index_str_neg_precision = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, metric='precision', pred_str='neg' - ) - overall_pos_precision = metrics[index_str_pos_precision] - overall_neg_precision = metrics[index_str_neg_precision] - expected_overall_pos_precision = (2 + 0 + 0) / (3 + 0 + 1) - expected_overall_neg_precision = (0 + 1 + 0) / (0 + 2 + 0) - assert overall_pos_precision['value'] == expected_overall_pos_precision - assert overall_neg_precision['value'] == expected_overall_neg_precision - assert overall_pos_precision['n'] == 4 - assert overall_neg_precision['n'] == 2 - - # recall overall - index_str_pos_recall = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, metric='recall', pred_str='pos' - ) - index_str_neg_recall = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, metric='recall', pred_str='neg' - ) - overall_pos_recall = metrics[index_str_pos_recall] - overall_neg_recall = metrics[index_str_neg_recall] - expected_overall_pos_recall = (2 + 0 + 0) / (2 + 1 + 0) - expected_overall_neg_recall = (0 + 1 + 0) / (1 + 1 + 1) - assert overall_pos_recall['value'] == expected_overall_pos_recall - assert overall_neg_recall['value'] == expected_overall_neg_recall - assert overall_pos_recall['n'] == 3 - assert overall_neg_recall['n'] == 3 - - # f1 overall - index_str_pos_f1 = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, metric='f1', pred_str='pos' - ) - index_str_neg_f1 = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, metric='f1', pred_str='neg' - ) - overall_pos_f1 = metrics[index_str_pos_f1] - overall_neg_f1 = metrics[index_str_neg_f1] - expected_overall_pos_f1 = ( - 2 - * expected_overall_pos_precision - * expected_overall_pos_recall - / (expected_overall_pos_precision + expected_overall_pos_recall) - ) - expected_overall_neg_f1 = ( - 2 - * expected_overall_neg_precision - * expected_overall_neg_recall - / (expected_overall_neg_precision + expected_overall_neg_recall) - ) - assert overall_pos_f1['value'] == expected_overall_pos_f1 - assert overall_neg_f1['value'] == expected_overall_neg_f1 - - # balanced f1 - index_str_balanced_f1 = healthbench_meta_eval.INDEX_STR_TEMPLATE.format( - model_or_physician=model_or_physician, metric='f1', pred_str='balanced' - ) - balanced_f1 = metrics[index_str_balanced_f1] - expected_balanced_f1 = (expected_overall_pos_f1 + expected_overall_neg_f1) / 2 - assert balanced_f1['value'] == expected_balanced_f1 - - # by cluster - # precision - cluster_a_str_pos_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='a', index_str=index_str_pos_precision - ) - cluster_a_str_neg_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='a', index_str=index_str_neg_precision - ) - cluster_a_pos_precision = metrics[cluster_a_str_pos_precision] - cluster_a_neg_precision = metrics[cluster_a_str_neg_precision] - assert cluster_a_pos_precision['value'] == ( - # example 1, 2 in order - (2 + 0) / (3 + 0) - ) - assert cluster_a_neg_precision['value'] == ( - # example 1, 2 in order - (0 + 1) / (0 + 2) - ) - assert cluster_a_pos_precision['n'] == 3 - assert cluster_a_neg_precision['n'] == 2 - - # recall - cluster_a_str_pos_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='a', index_str=index_str_pos_recall - ) - cluster_a_str_neg_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='a', index_str=index_str_neg_recall - ) - cluster_a_pos_recall = metrics[cluster_a_str_pos_recall] - cluster_a_neg_recall = metrics[cluster_a_str_neg_recall] - assert cluster_a_pos_recall['value'] == ( - # example 1, 2 in order - (2 + 0) / (2 + 1) - ) - assert cluster_a_neg_recall['value'] == ( - # example 1, 2 in order - (0 + 1) / (1 + 1) - ) - assert cluster_a_pos_recall['n'] == 3 - assert cluster_a_neg_recall['n'] == 2 - - # cluster B - # precision - cluster_b_str_pos_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='b', index_str=index_str_pos_precision - ) - cluster_b_str_neg_precision = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='b', index_str=index_str_neg_precision - ) - cluster_b_str_pos_precision = metrics[cluster_b_str_pos_precision] - assert cluster_b_str_neg_precision not in metrics - assert cluster_b_str_pos_precision['value'] == ( - # example 3 only - 0 / 1 - ) - assert cluster_b_str_pos_precision['n'] == 1 - - # recall - cluster_b_str_pos_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='b', index_str=index_str_pos_recall - ) - cluster_b_str_neg_recall = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='b', index_str=index_str_neg_recall - ) - assert cluster_b_str_pos_recall not in metrics - cluster_b_neg_recall = metrics[cluster_b_str_neg_recall] - assert cluster_b_neg_recall['value'] == ( - # example 3 only - 0 / 1 - ) - assert cluster_b_neg_recall['n'] == 1 - - # f1 - index_str_pos_f1 = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='b', index_str=index_str_pos_f1 - ) - index_str_neg_f1 = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='b', index_str=index_str_neg_f1 - ) - index_str_balanced_f1 = healthbench_meta_eval.CLUSTER_STR_TEMPLATE.format( - cluster='b', index_str=index_str_balanced_f1 - ) - assert index_str_pos_f1 not in metrics - assert index_str_neg_f1 not in metrics - assert index_str_balanced_f1 not in metrics - - -if __name__ == '__main__': - test_compute_agreement_for_rater_by_class() diff --git a/opencompass/datasets/healthbench/sampler/claude_sampler.py b/opencompass/datasets/healthbench/sampler/claude_sampler.py deleted file mode 100644 index 780a9953..00000000 --- a/opencompass/datasets/healthbench/sampler/claude_sampler.py +++ /dev/null @@ -1,103 +0,0 @@ -import os -import time - -import anthropic - -from .. import common -from ..types import MessageList, SamplerBase, SamplerResponse - -CLAUDE_SYSTEM_MESSAGE_LMSYS = ( - 'The assistant is Claude, created by Anthropic. The current date is ' - "{currentDateTime}. Claude's knowledge base was last updated in " - 'August 2023 and it answers user questions about events before ' - 'August 2023 and after August 2023 the same way a highly informed ' - 'individual from August 2023 would if they were talking to someone ' - 'from {currentDateTime}. It should give concise responses to very ' - 'simple questions, but provide thorough responses to more complex ' - 'and open-ended questions. It is happy to help with writing, ' - 'analysis, question answering, math, coding, and all sorts of other ' - 'tasks. It uses markdown for coding. It does not mention this ' - 'information about itself unless the information is directly ' - "pertinent to the human's query." -).format(currentDateTime='2024-04-01') -# reference: https://github.com/lm-sys/FastChat/blob/7899355ebe32117fdae83985cf8ee476d2f4243f/fastchat/conversation.py#L894 - - -class ClaudeCompletionSampler(SamplerBase): - - def __init__( - self, - model: str, - system_message: str | None = None, - temperature: float = 0.0, # default in Anthropic example - max_tokens: int = 4096, - ): - self.client = anthropic.Anthropic() - self.api_key = os.environ.get('ANTHROPIC_API_KEY') # please set your API_KEY - self.model = model - self.system_message = system_message - self.temperature = temperature - self.max_tokens = max_tokens - self.image_format = 'base64' - - def _handle_image( - self, - image: str, - encoding: str = 'base64', - format: str = 'png', - fovea: int = 768, - ): - new_image = { - 'type': 'image', - 'source': { - 'type': encoding, - 'media_type': f'image/{format}', - 'data': image, - }, - } - return new_image - - def _handle_text(self, text): - return {'type': 'text', 'text': text} - - def _pack_message(self, role, content): - return {'role': str(role), 'content': content} - - def __call__(self, message_list: MessageList) -> SamplerResponse: - trial = 0 - while True: - try: - if not common.has_only_user_assistant_messages(message_list): - raise ValueError(f'Claude sampler only supports user and assistant messages, got {message_list}') - if self.system_message: - response_message = self.client.messages.create( - model=self.model, - system=self.system_message, - max_tokens=self.max_tokens, - temperature=self.temperature, - messages=message_list, - ) - claude_input_messages: MessageList = [{'role': 'system', 'content': self.system_message}] + message_list - else: - response_message = self.client.messages.create( - model=self.model, - max_tokens=self.max_tokens, - temperature=self.temperature, - messages=message_list, - ) - claude_input_messages = message_list - response_text = response_message.content[0].text - return SamplerResponse( - response_text=response_text, - response_metadata={}, - actual_queried_message_list=claude_input_messages, - ) - except anthropic.RateLimitError as e: - exception_backoff = 2**trial # expontial back off - print( - f'Rate limit exception so wait and retry {trial} after {exception_backoff} sec', - e, - ) - time.sleep(exception_backoff) - trial += 1 - # unknown error shall throw exception diff --git a/opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py b/opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py deleted file mode 100644 index 39e115e5..00000000 --- a/opencompass/datasets/healthbench/sampler/o_chat_completion_sampler.py +++ /dev/null @@ -1,78 +0,0 @@ -import time -from typing import Any - -import openai -from openai import OpenAI - -from ..types import MessageList, SamplerBase, SamplerResponse - - -class OChatCompletionSampler(SamplerBase): - """Sample from OpenAI's chat completion API for o series models.""" - - def __init__( - self, - *, - reasoning_effort: str | None = None, - model: str = 'o1-mini', - ): - self.api_key_name = 'OPENAI_API_KEY' - self.client = OpenAI() - # using api_key=os.environ.get("OPENAI_API_KEY") # please set your API_KEY - self.model = model - self.image_format = 'url' - self.reasoning_effort = reasoning_effort - - def _handle_image( - self, - image: str, - encoding: str = 'base64', - format: str = 'png', - fovea: int = 768, - ): - new_image = { - 'type': 'image_url', - 'image_url': { - 'url': f'data:image/{format};{encoding},{image}', - }, - } - return new_image - - def _handle_text(self, text: str): - return {'type': 'text', 'text': text} - - def _pack_message(self, role: str, content: Any): - return {'role': str(role), 'content': content} - - def __call__(self, message_list: MessageList) -> SamplerResponse: - trial = 0 - while True: - try: - response = self.client.chat.completions.create( - model=self.model, - messages=message_list, - reasoning_effort=self.reasoning_effort, - ) - content = response.choices[0].message.content - return SamplerResponse( - response_text=content, - response_metadata={'usage': response.usage}, - actual_queried_message_list=message_list, - ) - # NOTE: BadRequestError is triggered once for MMMU, please uncomment if you are reruning MMMU - except openai.BadRequestError as e: - print('Bad Request Error', e) - return SamplerResponse( - response_text='', - response_metadata={'usage': None}, - actual_queried_message_list=message_list, - ) - except Exception as e: - exception_backoff = 2**trial # expontial back off - print( - f'Rate limit exception so wait and retry {trial} after {exception_backoff} sec', - e, - ) - time.sleep(exception_backoff) - trial += 1 - # unknown error shall throw exception diff --git a/opencompass/datasets/healthbench/sampler/responses_sampler.py b/opencompass/datasets/healthbench/sampler/responses_sampler.py deleted file mode 100644 index a152cbaf..00000000 --- a/opencompass/datasets/healthbench/sampler/responses_sampler.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import time -from typing import Any - -import openai -from openai import OpenAI - -from ..types import MessageList, SamplerBase, SamplerResponse - - -class ResponsesSampler(SamplerBase): - """Sample from OpenAI's responses API.""" - - def __init__( - self, - model: str = 'gpt-4.1', - system_message: str | None = None, - temperature: float = 0.5, - max_tokens: int = 1024, - reasoning_model: bool = False, - reasoning_effort: str | None = None, - ): - self.api_key_name = 'OPENAI_API_KEY' - assert os.environ.get('OPENAI_API_KEY'), 'Please set OPENAI_API_KEY' - self.client = OpenAI() - self.model = model - self.system_message = system_message - self.temperature = temperature - self.max_tokens = max_tokens - self.image_format = 'url' - self.reasoning_model = reasoning_model - self.reasoning_effort = reasoning_effort - - def _handle_image( - self, - image: str, - encoding: str = 'base64', - format: str = 'png', - fovea: int = 768, - ) -> dict[str, Any]: - new_image = { - 'type': 'input_image', - 'image_url': f'data:image/{format};{encoding},{image}', - } - return new_image - - def _handle_text(self, text: str) -> dict[str, Any]: - return {'type': 'input_text', 'text': text} - - def _pack_message(self, role: str, content: Any) -> dict[str, Any]: - return {'role': role, 'content': content} - - def __call__(self, message_list: MessageList) -> SamplerResponse: - if self.system_message: - message_list = [ - self._pack_message('developer', self.system_message) - ] + message_list - trial = 0 - while True: - try: - if self.reasoning_model: - reasoning = ({ - 'effort': self.reasoning_effort - } if self.reasoning_effort else None) - response = self.client.responses.create( - model=self.model, - input=message_list, - reasoning=reasoning, - ) - else: - response = self.client.responses.create( - model=self.model, - input=message_list, - temperature=self.temperature, - max_output_tokens=self.max_tokens, - ) - return SamplerResponse( - response_text=response.output_text, - response_metadata={'usage': response.usage}, - actual_queried_message_list=message_list, - ) - except openai.BadRequestError as e: - print('Bad Request Error', e) - return SamplerResponse( - response_text='', - response_metadata={'usage': None}, - actual_queried_message_list=message_list, - ) - except Exception as e: - exception_backoff = 2**trial # expontial back off - print( - f'Rate limit exception so wait and retry {trial} after {exception_backoff} sec', - e, - ) - time.sleep(exception_backoff) - trial += 1 - # unknown error shall throw exception diff --git a/opencompass/datasets/healthbench/types.py b/opencompass/datasets/healthbench/types.py new file mode 100644 index 00000000..8f6ebf49 --- /dev/null +++ b/opencompass/datasets/healthbench/types.py @@ -0,0 +1,55 @@ +from dataclasses import dataclass, field +from typing import Any, Literal, overload + +Message = dict[str, Any] # keys role, content +MessageList = list[Message] + + + +@dataclass +class SamplerResponse: + """Response from a sampler.""" + response_text: str + actual_queried_message_list: MessageList + response_metadata: dict[str, Any] + +class SamplerBase: + """Base class for defining a sampling model, which can be evaluated, or + used as part of the grading process.""" + + def __call__( + self, + message_list: MessageList, + ) -> SamplerResponse: + raise NotImplementedError + + +@dataclass +class EvalResult: + """Result of running an evaluation (usually consisting of many samples)""" + + score: float | None # top-line metric + metrics: dict[str, float] | None # other metrics + htmls: list[str] # strings of valid HTML + convos: list[MessageList] # sampled conversations + metadata: dict[str, Any] | None # Extra data such as rubric scores or sollen + + +@dataclass +class SingleEvalResult: + """Result of evaluating a single sample.""" + + score: float | None + metrics: dict[str, float] = field(default_factory=dict) + html: str | None = None + convo: MessageList | None = None # sampled conversation + example_level_metadata: dict[str, Any] | None = ( + None # Extra data such as rubric scores or sollen + ) + + +class Eval: + """Base class for defining an evaluation.""" + + def __call__(self, sampler: SamplerBase) -> EvalResult: + raise NotImplementedError From 7c6d788dca99173d7f82ed360a29ec82f253ce8f Mon Sep 17 00:00:00 2001 From: bio-mlhui Date: Fri, 23 May 2025 07:26:21 +0000 Subject: [PATCH 3/8] first --- ...5e2.py => healthbench_model_gen_ca540e.py} | 40 +++----- .../datasets/healthbench/healthbench.py | 96 ++++++++++++++----- opencompass/openicl/icl_prompt_template.py | 19 ---- root | 1 + 4 files changed, 85 insertions(+), 71 deletions(-) rename opencompass/configs/datasets/HealthBench/{healthbench_model_gen_4175e2.py => healthbench_model_gen_ca540e.py} (58%) create mode 120000 root diff --git a/opencompass/configs/datasets/HealthBench/healthbench_model_gen_4175e2.py b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_ca540e.py similarity index 58% rename from opencompass/configs/datasets/HealthBench/healthbench_model_gen_4175e2.py rename to opencompass/configs/datasets/HealthBench/healthbench_model_gen_ca540e.py index 229f2958..d62df21d 100644 --- a/opencompass/configs/datasets/HealthBench/healthbench_model_gen_4175e2.py +++ b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_ca540e.py @@ -1,47 +1,33 @@ from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator -from opencompass.openicl.icl_inferencer import GenInferencer -from opencompass.openicl.icl_prompt_template import HealthBenchTemplate +from opencompass.openicl.icl_inferencer import ChatInferencer +from opencompass.openicl.icl_prompt_template import PromptTemplate from opencompass.openicl.icl_retriever import ZeroRetriever # Reader configuration reader_cfg = dict( input_columns=[ - 'example_tags', 'ideal_completions_data', 'prompt', 'prompt_id', 'rubrics', 'canary' + 'prompt_trans' ], output_column='prompt_id', # useless ) -# Inference configuration infer_cfg = dict( prompt_template=dict( - type=HealthBenchTemplate, - key='prompt_trans', + type=PromptTemplate, + template=dict( + round=[ + dict( + role='HUMAN', + prompt='{prompt}', # prompt mode: zero-shot + ), + ], + ), ), retriever=dict(type=ZeroRetriever), - inferencer=dict(type=GenInferencer), + inferencer=dict(type=ChatInferencer), ) -# infer_cfg = dict( -# prompt_template=dict( -# type=PromptTemplate, -# template=dict( -# round=[ -# dict( -# role='HUMAN', -# prompt='{prompt_id}', # prompt mode: zero-shot -# ), -# dict( -# role='BOT', -# prompt='{prompt_id}', # prompt mode: zero-shot -# ), -# ], -# ), -# ), -# retriever=dict(type=ZeroRetriever), -# inferencer=dict(type=GenInferencer), -# ) - # Evaluation configuration eval_cfg = dict( evaluator=dict(type=HealthBenchEvaluator), diff --git a/opencompass/datasets/healthbench/healthbench.py b/opencompass/datasets/healthbench/healthbench.py index fe084d63..b9d2d74a 100644 --- a/opencompass/datasets/healthbench/healthbench.py +++ b/opencompass/datasets/healthbench/healthbench.py @@ -24,22 +24,8 @@ grading_sampler = ChatCompletionSampler( max_tokens=2048, ) def _parse(item): - prompt = item['prompt'] - new_prompts = [] - for idx in range(len(prompt)): - foo = {} - content = prompt[idx]['content'] - foo['prompt'] = content - role = prompt[idx]['role'] - if role == 'user': - foo['role'] = 'HUMAN' - elif role == 'assistant': - foo['role'] = 'BOT' - else: - raise ValueError() - new_prompts.append(foo) - item['prompt_trans'] = new_prompts - # item["rubrics"] = [RubricItem.from_dict(d) for d in item["rubrics"]] + prompt = item['prompt'] + [dict(role='assistant', content='')] + item['prompt_trans'] = prompt return item HEALTHBENCH_HTML_JINJA = ( @@ -84,6 +70,7 @@ class HealthBenchDataset(BaseDataset): dataset = dataset.map(lambda item: _parse(item)) return dataset + from collections import defaultdict from .types import MessageList @@ -195,6 +182,62 @@ def get_usage_dict(response_usage) -> dict[str, int | None]: } import hashlib +import numpy as np + +from .types import EvalResult, MessageList, SingleEvalResult + + +def _compute_clipped_stats( + values: list, + stat: str, +): + """Computes the mean (clipped to [0, 1]), bootstrap std for that mean, and + n_samples for final HealthBench scoring.""" + if stat == 'mean': + return np.clip(np.mean(values), 0, 1) + elif stat == 'n_samples': + return len(values) + elif stat == 'bootstrap_std': + bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)] + bootstrap_means = [ + _compute_clipped_stats(list(s), 'mean') for s in bootstrap_samples + ] + return np.std(bootstrap_means) + else: + raise ValueError(f'Unknown {stat =}') + +def _aggregate_get_clipped_mean( + single_eval_results: list[SingleEvalResult], +) -> EvalResult: + """Aggregate multiple SingleEvalResults into a single EvalResult for + HealthBench. + + For each metric, returns the stats in _compute_clipped_stats. + """ + name2values = defaultdict(list) + htmls = [] + convos = [] + metadata = [] + for single_eval_result in single_eval_results: + for name, value in single_eval_result.metrics.items(): + name2values[name].append(value) + if single_eval_result.score is not None: + name2values['score'].append(single_eval_result.score) + htmls.append(single_eval_result.html) + convos.append(single_eval_result.convo) + metadata.append(single_eval_result.example_level_metadata) + final_metrics = {} + for name, values in name2values.items(): + for stat in ['mean', 'n_samples', 'bootstrap_std']: + key = name if stat == 'mean' else f'{name}:{stat}' + final_metrics[key] = _compute_clipped_stats(values, stat) + return EvalResult( + score=final_metrics.pop('score', None), + metrics=final_metrics, + htmls=htmls, + convos=convos, + metadata={'example_level_metadata': metadata}, + ) class HealthBenchEvaluator(BaseEvaluator): @@ -290,10 +333,8 @@ class HealthBenchEvaluator(BaseEvaluator): def score(self, predictions, references, test_set): results = [] - ret = [] if len(predictions) != len(references): return {'error': 'preds and refrs have different length'} - all_score = 0 for idx, (i, j) in enumerate(zip(predictions, references)): row = test_set[idx] prompt_messages = row['prompt'] @@ -328,7 +369,7 @@ class HealthBenchEvaluator(BaseEvaluator): convo = actual_queried_prompt_messages + [ dict(content=response_text, role='assistant') ] - ret.append(SingleEvalResult( + results.append(SingleEvalResult( html=html, score=score, convo=convo, @@ -345,13 +386,18 @@ class HealthBenchEvaluator(BaseEvaluator): ).hexdigest(), }, )) - all_score += score - avg_score = all_score / float(idx+1) - - return { - 'score': avg_score + results = _aggregate_get_clipped_mean(results) + assert results.metrics is not None + metrics = results.metrics | {'score': results.score} + metrics = dict(sorted(metrics.items())) + result_dict = { + 'score': results.score, + 'metrics': results.metrics, + 'htmls': results.htmls, + 'convos': results.convos, + 'metadata': results.metadata, } - + return {'accuracy': result_dict['score'],} diff --git a/opencompass/openicl/icl_prompt_template.py b/opencompass/openicl/icl_prompt_template.py index 811a4006..db2dcc91 100644 --- a/opencompass/openicl/icl_prompt_template.py +++ b/opencompass/openicl/icl_prompt_template.py @@ -257,22 +257,3 @@ class PromptTemplate: prompt.append(dict(section='end', pos='end')) return prompt - - -class HealthBenchTemplate: - - def __init__( - self, - key: Union[Dict, str], - ) -> None: - self.key = key - - def generate_item(self, entry: Dict, **kwargs): - template = [{'section': 'round', 'pos': 'begin'}] - end_template = [{'section': 'round', 'pos': 'end'}] - mid = entry[self.key] - template = template + mid + end_template - ret = PromptList() - for item in template: - ret.append(item) - return ret diff --git a/root b/root new file mode 120000 index 00000000..433c9364 --- /dev/null +++ b/root @@ -0,0 +1 @@ +/root \ No newline at end of file From c4f219246cf497790ca6b5cdec5be13f953dce88 Mon Sep 17 00:00:00 2001 From: bio-mlhui Date: Thu, 29 May 2025 04:23:49 +0000 Subject: [PATCH 4/8] fix bench --- ...40e.py => healthbench_model_gen_831613.py} | 46 +- opencompass/datasets/healthbench/drop_eval.py | 331 --------- .../datasets/healthbench/healthbench.py | 304 ++++---- .../datasets/healthbench/healthbench_eval.py | 648 ------------------ 4 files changed, 186 insertions(+), 1143 deletions(-) rename opencompass/configs/datasets/HealthBench/{healthbench_model_gen_ca540e.py => healthbench_model_gen_831613.py} (51%) delete mode 100644 opencompass/datasets/healthbench/drop_eval.py delete mode 100644 opencompass/datasets/healthbench/healthbench_eval.py diff --git a/opencompass/configs/datasets/HealthBench/healthbench_model_gen_ca540e.py b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_831613.py similarity index 51% rename from opencompass/configs/datasets/HealthBench/healthbench_model_gen_ca540e.py rename to opencompass/configs/datasets/HealthBench/healthbench_model_gen_831613.py index d62df21d..3a03e637 100644 --- a/opencompass/configs/datasets/HealthBench/healthbench_model_gen_ca540e.py +++ b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_831613.py @@ -1,4 +1,4 @@ -from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator +from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator, HealthBenchDatasetMeta from opencompass.openicl.icl_inferencer import ChatInferencer from opencompass.openicl.icl_prompt_template import PromptTemplate from opencompass.openicl.icl_retriever import ZeroRetriever @@ -7,7 +7,7 @@ from opencompass.openicl.icl_retriever import ZeroRetriever # Reader configuration reader_cfg = dict( input_columns=[ - 'prompt_trans' + 'prompt_trans', ], output_column='prompt_id', # useless ) @@ -19,7 +19,7 @@ infer_cfg = dict( round=[ dict( role='HUMAN', - prompt='{prompt}', # prompt mode: zero-shot + prompt='{prompt_trans}', # prompt mode: zero-shot ), ], ), @@ -29,18 +29,18 @@ infer_cfg = dict( ) # Evaluation configuration -eval_cfg = dict( - evaluator=dict(type=HealthBenchEvaluator), - pred_role='BOT', -) -healthbench_vanilla_dataset = dict( + +healthbench_dataset = dict( type=HealthBenchDataset, - abbr='healthbench_vanilla', + abbr='healthbench', path='huihuixu/healthbench', - subset='vanilla', + subset='', reader_cfg=reader_cfg, infer_cfg=infer_cfg, - eval_cfg=eval_cfg, + eval_cfg=dict( + evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name=''), + pred_role='BOT', + ), ) healthbench_hard_dataset = dict( type=HealthBenchDataset, @@ -49,7 +49,10 @@ healthbench_hard_dataset = dict( subset='hard', reader_cfg=reader_cfg, infer_cfg=infer_cfg, - eval_cfg=eval_cfg, + eval_cfg=dict( + evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name='hard'), + pred_role='BOT', + ), ) healthbench_consensus_dataset = dict( type=HealthBenchDataset, @@ -58,7 +61,22 @@ healthbench_consensus_dataset = dict( subset='consensus', reader_cfg=reader_cfg, infer_cfg=infer_cfg, - eval_cfg=eval_cfg, + eval_cfg=dict( + evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name='consensus'), + pred_role='BOT', + ), ) +# healthbench_meta_dataset = dict( +# type=HealthBenchDatasetMeta, +# abbr='healthbench_meta', +# path='huihuixu/healthbench', +# subset='meta', +# reader_cfg=reader_cfg, +# infer_cfg=infer_cfg, +# eval_cfg=dict( +# evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name=''), +# pred_role='BOT', +# ), +# ) -healthbench_all_datasets = [healthbench_vanilla_dataset, healthbench_hard_dataset, healthbench_consensus_dataset ] \ No newline at end of file +healthbench_all_datasets = [healthbench_dataset, healthbench_hard_dataset, healthbench_consensus_dataset, ] # healthbench_meta_dataset ] \ No newline at end of file diff --git a/opencompass/datasets/healthbench/drop_eval.py b/opencompass/datasets/healthbench/drop_eval.py deleted file mode 100644 index e6205ca4..00000000 --- a/opencompass/datasets/healthbench/drop_eval.py +++ /dev/null @@ -1,331 +0,0 @@ -""" -DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs -Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, Matt Gardner -https://arxiv.org/abs/1903.00161 -""" - -import gzip -import json -import random -import re -import string -from typing import Any, Dict, List, Optional, Set, Tuple, Union - -import numpy as np -from scipy.optimize import linear_sum_assignment - -from . import common -from .common import ANSWER_PATTERN, HTML_JINJA -from .types import Eval, EvalResult, SamplerBase, SingleEvalResult -""" -From here through _normalize_answer was originally copied from: -https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ -Then cleaned up and modified a bit. - -The rest was originally copied from https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc -/eval/drop_eval.py -""" - - -def _remove_articles(text: str) -> str: - regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) - return re.sub(regex, ' ', text) - - -def _white_space_fix(text: str) -> str: - return ' '.join(text.split()) - - -EXCLUDE = set(string.punctuation) - - -def _remove_punc(text: str) -> str: - if not _is_number(text): - return ''.join(ch for ch in text if ch not in EXCLUDE) - else: - return text - - -def _lower(text: str) -> str: - return text.lower() - - -def _tokenize(text: str) -> List[str]: - return re.split(' |-', text) - - -def _normalize_answer(text: str) -> str: - """Lower text and remove punctuation, articles and extra whitespace.""" - - parts = [ - _white_space_fix( - _remove_articles(_normalize_number(_remove_punc(_lower(token))))) - for token in _tokenize(text) - ] - parts = [part for part in parts if part.strip()] - normalized = ' '.join(parts).strip() - return normalized - - -def _is_number(text: str) -> bool: - try: - float(text) - return True - except ValueError: - return False - - -def _normalize_number(text: str) -> str: - if _is_number(text): - return str(float(text)) - else: - return text - - -def _answer_to_bags( - answer: Union[str, List[str], Tuple[str, ...]] -) -> Tuple[List[str], List[Set[str]]]: - if isinstance(answer, (list, tuple)): - raw_spans = answer - else: - raw_spans = [answer] - normalized_spans: List[str] = [] - token_bags = [] - for raw_span in raw_spans: - normalized_span = _normalize_answer(raw_span) - normalized_spans.append(normalized_span) - token_bags.append(set(normalized_span.split())) - return normalized_spans, token_bags - - -def _align_bags(predicted: List[Set[str]], - gold: List[Set[str]]) -> List[float]: - """Takes gold and predicted answer sets and first finds the optimal 1-1 - alignment between them and gets maximum metric values over all the - answers.""" - scores = np.zeros([len(gold), len(predicted)]) - for gold_index, gold_item in enumerate(gold): - for pred_index, pred_item in enumerate(predicted): - if _match_numbers_if_present(gold_item, pred_item): - scores[gold_index, - pred_index] = _compute_f1(pred_item, gold_item) - row_ind, col_ind = linear_sum_assignment(-scores) - - max_scores = np.zeros([max(len(gold), len(predicted))]) - for row, column in zip(row_ind, col_ind): - max_scores[row] = max(max_scores[row], scores[row, column]) - return max_scores - - -def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float: - intersection = len(gold_bag.intersection(predicted_bag)) - if not predicted_bag: - precision = 1.0 - else: - precision = intersection / float(len(predicted_bag)) - if not gold_bag: - recall = 1.0 - else: - recall = intersection / float(len(gold_bag)) - f1 = ((2 * precision * recall) / (precision + recall) - if not (precision == 0.0 and recall == 0.0) else 0.0) * 100 - return f1 - - -def _match_numbers_if_present(gold_bag: Set[str], - predicted_bag: Set[str]) -> bool: - gold_numbers = set() - predicted_numbers = set() - for word in gold_bag: - if _is_number(word): - gold_numbers.add(word) - for word in predicted_bag: - if _is_number(word): - predicted_numbers.add(word) - if (not gold_numbers) or gold_numbers.intersection(predicted_numbers): - return True - return False - - -def get_drop_metrics( - predicted: Union[str, List[str], Tuple[str, ...]], - gold: Union[str, List[str], Tuple[str, ...]]) -> Tuple[float, float]: - """Takes a predicted answer and a gold answer (that are both either a - string or a list of strings), and returns exact match and the DROP F1 - metric for the prediction. - - If you are - writing a script for evaluating objects in memory (say, the output of predictions during - validation, or while training), this is the function you want to call, after using - :func:`answer_json_to_strings` when reading the gold answer from the released data file. - """ - predicted_bags = _answer_to_bags(predicted) - gold_bags = _answer_to_bags(gold) - - if set(predicted_bags[0]) == set(gold_bags[0]) and len( - predicted_bags[0]) == len(gold_bags[0]): - exact_match = 1.0 - else: - exact_match = 0.0 - - f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) - f1 = np.mean(f1_per_bag) - f1 = round(f1, 2) - return exact_match, f1 - - -def answer_json_to_strings( - answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: - """Takes an answer JSON blob from the DROP data release and converts it - into strings used for evaluation.""" - if 'number' in answer and answer['number']: - return tuple([str(answer['number'])]), 'number' - elif 'spans' in answer and answer['spans']: - return tuple( - answer['spans']), 'span' if len(answer['spans']) == 1 else 'spans' - elif 'date' in answer: - return ( - tuple([ - '{0} {1} {2}'.format(answer['date']['day'], - answer['date']['month'], - answer['date']['year']).strip() - ]), - 'date', - ) - else: - raise ValueError( - f'Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}' - ) - - -def answer_json_to_string(answer_json): - return json.dumps(answer_json_to_strings(answer_json)) - - -def normalize(s: str) -> str: - """Lower text and remove punctuation, articles and extra whitespace.""" - s = s.lower() - exclude = set(string.punctuation) - s = ''.join(char for char in s if char not in exclude) - s = re.sub(r'\b(a|an|the)\b', ' ', s) - s = ' '.join(s.split()) - return s - - -def fuzzy_match(s1: str, s2: str) -> bool: - s1 = normalize(s1) - s2 = normalize(s2) - - if s1 == '' or s2 == '': - return s1 == s2 - - return s1 in s2 or s2 in s1 - - -def drop_metric(sample: str, reference: list[str]) -> Tuple[float, float]: - em_scores = [] - f1_scores = [] - for answer in reference: - if answer.strip() != '': - em, f1 = get_drop_metrics(sample, answer) - em_scores.append(em) - f1_scores.append(f1) - return (max(em_scores), max(f1_scores)) - - -class DropEval(Eval): - - def __init__(self, - num_examples: int | None = None, - train_samples_per_prompt: int = 3): - self.seed = 42 - self._num_examples = num_examples - self._train_samples_per_prompt = train_samples_per_prompt - self.train_jsonl = ( - 'https://openaipublic.blob.core.windows.net/simple-evals/drop_v0_train.jsonl.gz' - ) - self.test_jsonl = ( - 'https://openaipublic.blob.core.windows.net/simple-evals/drop_v0_dev.jsonl.gz' - ) - with gzip.GzipFile(fileobj=common.url_to_fileobj(self.train_jsonl, - binary=True), - mode='rb') as f: - self.train_samples = list(map(json.loads, f.readlines())) - with gzip.GzipFile(fileobj=common.url_to_fileobj(self.test_jsonl, - binary=True), - mode='rb') as f: - self.test_samples = list(map(json.loads, f.readlines())) - if self._num_examples: - self.test_samples = random.Random(self.seed).sample( - self.test_samples, self._num_examples) - - def __call__(self, sampler: SamplerBase) -> EvalResult: - rng = random.Random(self.seed) - - def fn(example: dict[str, str]): - stuffing = rng.sample(self.train_samples, - self._train_samples_per_prompt) - - # prompt = """TASK: Read the provided passage, then identify the correct answer to questions below.""" - prompt = """You will be asked to read a passage and answer a question. Some examples of passages and Q&A are provided below.""" - prompt += '\n\n# Examples' - samples = stuffing + [example] - for i, sample in enumerate(samples): - is_test = i == len(stuffing) - prompt += '\n# Your Task\n' if is_test else '' - prompt += f""" ---- -{sample["context"]} """ - - a = sample['completion'] - correct_answers = sample['ref_text'].split('|') - - if not is_test: - prompt += a + '\n' - else: - prompt += """\n -Think step by step, then write a line of the form "Answer: $ANSWER" at the end of your response. - """ - prompt_messages = [ - sampler._pack_message(content=prompt, role='user') - ] - sampler_response = sampler(prompt_messages) - response_text = sampler_response.response_text - actual_queried_prompt_messages = sampler_response.actual_queried_message_list - match = re.search(ANSWER_PATTERN, response_text) - extracted_answer = match.group( - 1) if match else response_text - em_score, f1_score = drop_metric(extracted_answer, - correct_answers) - matches = [ - fuzzy_match(extracted_answer, correct_answer) - for correct_answer in correct_answers - ] - extracted_answers = [ - extracted_answer for i in range(len(correct_answers)) - if matches[i] - ] - score = True in matches - html = common.jinja_env.from_string(HTML_JINJA).render( - prompt_messages=actual_queried_prompt_messages, - next_message=dict(content=extracted_answer, - role='assistant'), - score=score, - correct_answer=correct_answers, - extracted_answer=extracted_answers, - ) - convo = actual_queried_prompt_messages + [ - dict(content=extracted_answer, role='assistant') - ] - return SingleEvalResult( - html=html, - score=score, - convo=convo, - metrics={ - 'em_score': em_score, - 'f1_score': f1_score - }, - ) - - results = common.map_with_progress(fn, self.test_samples) - return common.aggregate_results(results) diff --git a/opencompass/datasets/healthbench/healthbench.py b/opencompass/datasets/healthbench/healthbench.py index b9d2d74a..4e5f7434 100644 --- a/opencompass/datasets/healthbench/healthbench.py +++ b/opencompass/datasets/healthbench/healthbench.py @@ -1,6 +1,10 @@ +import hashlib import json import re +from collections import defaultdict +from typing import Literal +import numpy as np from datasets import load_dataset from opencompass.openicl import BaseEvaluator @@ -8,136 +12,116 @@ from opencompass.registry import LOAD_DATASET from ..base import BaseDataset from . import common -from .healthbench_eval import RubricItem from .sampler.chat_completion_sampler import ChatCompletionSampler -from .types import SingleEvalResult +from .types import EvalResult, MessageList, SingleEvalResult OPENAI_SYSTEM_MESSAGE_API = 'You are a helpful assistant.' -OPENAI_SYSTEM_MESSAGE_CHATGPT = ( - 'You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.' - + '\nKnowledge cutoff: 2023-12\nCurrent date: 2024-04-01' -) +max_tokens_judge = 2048 + + +class RubricItem: + def __init__(self, criterion: str, points: float, tags: list[str]): + self.criterion = criterion + self.points = points + self.tags = tags + + def __str__(self): + return f'[{self.points}] {self.criterion}' + + def to_dict(self): + return { + 'criterion': self.criterion, + 'points': self.points, + 'tags': self.tags, + } + + @classmethod + def from_dict(cls, d: dict): + return cls( + criterion=d['criterion'], + points=d['points'], + tags=d['tags'], + ) + -grading_sampler = ChatCompletionSampler( - model='gpt-4.1-2025-04-14', - system_message=OPENAI_SYSTEM_MESSAGE_API, - max_tokens=2048, -) def _parse(item): prompt = item['prompt'] + [dict(role='assistant', content='')] item['prompt_trans'] = prompt return item -HEALTHBENCH_HTML_JINJA = ( - common.HTML_JINJA.replace( - '

Correct Answer: {{ correct_answer }}

\n', - '', - ) - + '

Rubrics with grades: {{ rubric_grades }}

' -) +HEALTHBENCH_HTML_JINJA = common.HTML_JINJA.replace('

Correct Answer: {{ correct_answer }}

\n', '', ) + '

Rubrics with grades: {{ rubric_grades }}

' # noqa: E305, E501 def parse_json_to_dict(json_string: str) -> dict: - # Remove markdown-style ```json``` markers if present - json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip()) - + json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip()) # noqa: W291, E501 try: return json.loads(json_cleaned) except json.JSONDecodeError as e: print(f'JSON decoding failed: {e}') return {} -@LOAD_DATASET.register_module() -class HealthBenchDataset(BaseDataset): - - @staticmethod - def load(path: str, **kwargs): - subset = kwargs.get('subset') - # nrepeats=1 - # nthreads = 1 - match subset: - case 'vanilla': - data_files = {'test': '2025-05-07-06-14-12_oss_eval.jsonl'} - case 'hard': - data_files = {'test': 'hard_2025-05-08-21-00-10.jsonl'} - case 'consensus': - data_files = {'test': 'consensus_2025-05-09-20-00-46.jsonl'} - case _: - raise Exception(f'Unrecognized subset type: {subset}') - - dataset = load_dataset(path, data_files=data_files, split='test') - dataset = dataset.select(range(2)) - dataset = dataset.map(lambda item: _parse(item)) - - return dataset - -from collections import defaultdict - -from .types import MessageList GRADER_TEMPLATE = """ -Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item. + Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item. + # Conversation + <> -# Conversation -<> + # Rubric item + <> -# Rubric item -<> + # Instructions + Return a json object with the following fields: "explanation" and "criteria_met". + - The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item. + - The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met. + - One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria. -# Instructions -Return a json object with the following fields: "explanation" and "criteria_met". -- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item. -- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met. -- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria. + # Example 1 + For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this: -# Example 1 -For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this: + ```json + { + "explanation": "The assistant did not tell the user to go to the hospital if unconscious.", + "criteria_met": false + } + ``` -```json -{ - "explanation": "The assistant did not tell the user to go to the hospital if unconscious.", - "criteria_met": false -} -``` + # Example 2 + Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not. + For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this: -# Example 2 -Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not. -For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this: + ```json + { + "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.", + "criteria_met": false + } + ``` -```json -{ - "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.", - "criteria_met": false -} -``` + As another example, if the criteria says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this: -As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this: + ```json + { + "explanation": "The response is concise, so the criteria was not met.", + "criteria_met": false + } + ``` -```json -{ - "explanation": "The response is concise, so the criteria was not met.", - "criteria_met": false -} -``` + In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true. -In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true. + # Final instruction + Return just the json object in markdown format. Do not include any other text in the response. +""".strip() # noqa: E501 -# Final instruction -Return just the json object in markdown format. Do not include any other text in the response. -""".strip() -def calculate_score( - rubric_items: list[RubricItem], grading_response_list: list[dict] -) -> float | None: +def calculate_score(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> float | None: # noqa: E501 total_possible_points = sum( - rubric_item['points'] for rubric_item in rubric_items if rubric_item['points'] > 0 + rubric_item.points for rubric_item in rubric_items if rubric_item.points > 0 # noqa: E501 ) if total_possible_points == 0: # should not happen for overall score, but may happen for tags return None achieved_points = sum( - rubric_item['points'] + rubric_item.points for rubric_item, grading_response in zip( rubric_items, grading_response_list, strict=True ) @@ -146,6 +130,7 @@ def calculate_score( overall_score = achieved_points / total_possible_points return overall_score + def get_usage_dict(response_usage) -> dict[str, int | None]: if response_usage is None: return { @@ -157,34 +142,39 @@ def get_usage_dict(response_usage) -> dict[str, int | None]: } try: + input_tokens = response_usage.input_tokens + input_tokens_details = response_usage.input_tokens_details + output_tokens = response_usage.output_tokens + output_tokens_details = response_usage.output_tokens_details + total_tokens = response_usage.total_tokens return { - 'input_tokens': response_usage.input_tokens, - 'input_cached_tokens': response_usage.input_tokens_details.cached_tokens - if hasattr(response_usage.input_tokens_details, 'cached_tokens') - else response_usage.input_tokens_details['cached_tokens'], - 'output_tokens': response_usage.output_tokens, - 'output_reasoning_tokens': response_usage.output_tokens_details.reasoning_tokens - if hasattr(response_usage.output_tokens_details, 'reasoning_tokens') - else response_usage.output_tokens_details['reasoning_tokens'], - 'total_tokens': response_usage.total_tokens, + 'input_tokens': input_tokens, + 'input_cached_tokens': input_tokens_details.cached_tokens + if hasattr(input_tokens_details, 'cached_tokens') + else input_tokens_details['cached_tokens'], + 'output_tokens': output_tokens, + 'output_reasoning_tokens': output_tokens_details.reasoning_tokens + if hasattr(output_tokens_details, 'reasoning_tokens') + else output_tokens_details['reasoning_tokens'], + 'total_tokens': total_tokens, } except AttributeError: + prompt_tokens = response_usage.prompt_tokens + prompt_tokens_details = response_usage.prompt_tokens_details + completion_tokens = response_usage.completion_tokens + completion_tokens_details = response_usage.completion_tokens_details # noqa: E501 + total_tokens = response_usage.total_tokens return { - 'input_tokens': response_usage.prompt_tokens, - 'input_cached_tokens': response_usage.prompt_tokens_details.cached_tokens - if hasattr(response_usage.prompt_tokens_details, 'cached_tokens') - else response_usage.prompt_tokens_details['cached_tokens'], - 'output_tokens': response_usage.completion_tokens, - 'output_reasoning_tokens': response_usage.completion_tokens_details.reasoning_tokens - if hasattr(response_usage.completion_tokens_details, 'reasoning_tokens') - else response_usage.completion_tokens_details['reasoning_tokens'], - 'total_tokens': response_usage.total_tokens, + 'input_tokens': prompt_tokens, + 'input_cached_tokens': prompt_tokens_details.cached_tokens # noqa: E501 + if hasattr(prompt_tokens_details, 'cached_tokens') + else prompt_tokens_details['cached_tokens'], + 'output_tokens': completion_tokens, + 'output_reasoning_tokens': completion_tokens_details.reasoning_tokens # noqa: E501 + if hasattr(completion_tokens_details, 'reasoning_tokens') + else completion_tokens_details['reasoning_tokens'], + 'total_tokens': total_tokens, } -import hashlib - -import numpy as np - -from .types import EvalResult, MessageList, SingleEvalResult def _compute_clipped_stats( @@ -198,7 +188,7 @@ def _compute_clipped_stats( elif stat == 'n_samples': return len(values) elif stat == 'bootstrap_std': - bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)] + bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)] # noqa: E501 bootstrap_means = [ _compute_clipped_stats(list(s), 'mean') for s in bootstrap_samples ] @@ -206,6 +196,7 @@ def _compute_clipped_stats( else: raise ValueError(f'Unknown {stat =}') + def _aggregate_get_clipped_mean( single_eval_results: list[SingleEvalResult], ) -> EvalResult: @@ -239,17 +230,41 @@ def _aggregate_get_clipped_mean( metadata={'example_level_metadata': metadata}, ) -class HealthBenchEvaluator(BaseEvaluator): - def grade_sample( - self, - prompt: list[dict[str, str]], - response_text: str, - example_tags: list[str], - rubric_items: list[RubricItem], - ) -> tuple[dict, str, list[dict]]: +@LOAD_DATASET.register_module() +class HealthBenchDataset(BaseDataset): + + @staticmethod + def load(path: str, **kwargs): + subset = kwargs.get('subset') + match subset: + case '': + data_files = {'test': '2025-05-07-06-14-12_oss_eval.jsonl'} + case 'hard': + data_files = {'test': 'hard_2025-05-08-21-00-10.jsonl'} + case 'consensus': + data_files = {'test': 'consensus_2025-05-09-20-00-46.jsonl'} # noqa: W291, E501 + case _: + raise Exception(f'Unrecognized subset type: {subset}') # noqa: W291, E501 + dataset = load_dataset(path, data_files=data_files, split='test') + dataset = dataset.select(range(2)) + dataset = dataset.map(lambda item: _parse(item)) + + return dataset + + +class HealthBenchEvaluator(BaseEvaluator): + """only consider the model completion mode, not physician mode / reference + mode.""" + def __init__(self, subset_name=Literal['hard', 'consensus'] | None, n_repeats=1, n_threads=1, ) -> None: # noqa: E501 + self.n_repeats = n_repeats + self.n_threads = n_threads + self.subset_name = subset_name + self.grader_model = ChatCompletionSampler(model='gpt-4.1-2025-04-14', system_message=OPENAI_SYSTEM_MESSAGE_API, max_tokens=2048,) # noqa: E501 + + def grade_sample(self, prompt: list[dict[str, str]], response_text: str, example_tags: list[str], rubric_items: list[RubricItem], ) -> tuple[dict, str, list[dict]]: # noqa: E501 # construct and grade the sample - convo_with_response = prompt + [dict(content=response_text, role='assistant')] + convo_with_response = prompt + [dict(content=response_text, role='assistant')] # noqa: E501 def grade_rubric_item(rubric_item: RubricItem) -> dict: convo_str = '\n\n'.join( @@ -260,7 +275,7 @@ class HealthBenchEvaluator(BaseEvaluator): ).replace('<>', str(rubric_item)) messages: MessageList = [dict(content=grader_prompt, role='user')] while True: - sampler_response = grading_sampler(messages) + sampler_response = self.grader_model(messages) grading_response = sampler_response.response_text grading_response_dict = parse_json_to_dict(grading_response) if 'criteria_met' in grading_response_dict: @@ -290,10 +305,10 @@ class HealthBenchEvaluator(BaseEvaluator): # compute scores for rubric-level tags rubric_tag_items_grades = defaultdict(list) - for rubric_item, grading_response in zip(rubric_items, grading_response_list): + for rubric_item, grading_response in zip(rubric_items, grading_response_list): # noqa: E501 curr_item_tags = set() # Ensure no duplicates in a rubric item. - for tag in rubric_item['tags']: - rubric_tag_items_grades[tag].append((rubric_item, grading_response)) + for tag in rubric_item.tags: + rubric_tag_items_grades[tag].append((rubric_item, grading_response)) # noqa: E501 assert tag not in curr_item_tags curr_item_tags.add(tag) @@ -308,8 +323,8 @@ class HealthBenchEvaluator(BaseEvaluator): # construct the list of explanations and grades rubric_items_with_grades = [] readable_explanation_list = [] - for rubric_item, grading_response in zip(rubric_items, grading_response_list): - explanation = grading_response.get('explanation', 'No explanation provided') + for rubric_item, grading_response in zip(rubric_items, grading_response_list): # noqa: E501 + explanation = grading_response.get('explanation', 'No explanation provided') # noqa: E501 criteria_met = grading_response['criteria_met'] readable_explanation = ( f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}' @@ -317,7 +332,7 @@ class HealthBenchEvaluator(BaseEvaluator): readable_explanation_list.append(readable_explanation) rubric_items_with_grades.append( { - **rubric_item, + **rubric_item.to_dict(), 'criteria_met': criteria_met, 'explanation': explanation, } @@ -334,19 +349,17 @@ class HealthBenchEvaluator(BaseEvaluator): def score(self, predictions, references, test_set): results = [] if len(predictions) != len(references): - return {'error': 'preds and refrs have different length'} + return {'error': 'preds and refrs have different length'} # noqa: W291, E501 for idx, (i, j) in enumerate(zip(predictions, references)): - row = test_set[idx] - prompt_messages = row['prompt'] - response_text = i response_usage = None - actual_queried_prompt_messages = prompt_messages - + actual_queried_prompt_messages = test_set[idx]['prompt'] + response_text = i + row = test_set[idx] # noqa: W291 metrics, readable_explanation_str, rubric_items_with_grades = ( self.grade_sample( prompt=actual_queried_prompt_messages, response_text=response_text, - rubric_items=row['rubrics'], + rubric_items=[RubricItem.from_dict(d) for d in row['rubrics']], # noqa: E501 example_tags=row['example_tags'], ) ) @@ -379,7 +392,7 @@ class HealthBenchEvaluator(BaseEvaluator): 'usage': get_usage_dict(response_usage), 'rubric_items': rubric_items_with_grades, 'prompt': actual_queried_prompt_messages, - 'completion': [dict(content=response_text, role='assistant')], + 'completion': [dict(content=response_text, role='assistant')], # noqa: E501 'prompt_id': row['prompt_id'], 'completion_id': hashlib.sha256( (row['prompt_id'] + response_text).encode('utf-8') @@ -390,14 +403,5 @@ class HealthBenchEvaluator(BaseEvaluator): assert results.metrics is not None metrics = results.metrics | {'score': results.score} metrics = dict(sorted(metrics.items())) - result_dict = { - 'score': results.score, - 'metrics': results.metrics, - 'htmls': results.htmls, - 'convos': results.convos, - 'metadata': results.metadata, - } - return {'accuracy': result_dict['score'],} - - - + acc = metrics.get('f1_score', metrics.get('score', None)) + return {'accuracy': acc, } diff --git a/opencompass/datasets/healthbench/healthbench_eval.py b/opencompass/datasets/healthbench/healthbench_eval.py deleted file mode 100644 index 055b9b44..00000000 --- a/opencompass/datasets/healthbench/healthbench_eval.py +++ /dev/null @@ -1,648 +0,0 @@ -"""This script evaluates the performance of a model on the HealthBench dataset. - -To run HealthBench, HealthBench Consensus, or HealthBench Hard, use the simple-evals script: -- `python -m simple-evals.simple_evals --eval=healthbench --model=gpt-4.1` -- `python -m simple-evals.simple_evals --eval=healthbench_consensus --model=gpt-4.1` -- `python -m simple-evals.simple_evals --eval=healthbench_hard --model=gpt-4.1` - -You can also evaluate physician ideal completions or reference completions against the HealthBench rubrics. To do so, run the following command: -- To evaluate physician ideal completions: `python -m simple-evals.healthbench_eval --run_mode=physician_completions` -- To evaluate reference model completions used by physicians: `python -m simple-evals.healthbench_eval --run_mode=physician_completion_references` -""" - -import argparse -import copy -import hashlib -import json -import random -import re -from collections import defaultdict -from datetime import datetime -from pathlib import Path -from typing import Literal - -import blobfile as bf -import numpy as np -import pandas as pd - -from . import common -from .sampler.chat_completion_sampler import (OPENAI_SYSTEM_MESSAGE_API, - ChatCompletionSampler) -from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult - -INPUT_PATH = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_eval.jsonl' -INPUT_PATH_HARD = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/hard_2025-05-08-21-00-10.jsonl' -INPUT_PATH_CONSENSUS = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/consensus_2025-05-09-20-00-46.jsonl' - -GRADER_TEMPLATE = """ -Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item. - -# Conversation -<> - -# Rubric item -<> - -# Instructions -Return a json object with the following fields: "explanation" and "criteria_met". -- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item. -- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met. -- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria. - -# Example 1 -For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this: - -```json -{ - "explanation": "The assistant did not tell the user to go to the hospital if unconscious.", - "criteria_met": false -} -``` - -# Example 2 -Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not. -For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this: - -```json -{ - "explanation": "The assistant actually gave the user accurate information, so the criteria was not met.", - "criteria_met": false -} -``` - -As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this: - -```json -{ - "explanation": "The response is concise, so the criteria was not met.", - "criteria_met": false -} -``` - -In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true. - -# Final instruction -Return just the json object in markdown format. Do not include any other text in the response. -""".strip() - -HEALTHBENCH_HTML_JINJA = (common.HTML_JINJA.replace( - '

Correct Answer: {{ correct_answer }}

\n', - '', -) + '

Rubrics with grades: {{ rubric_grades }}

') - - -def parse_json_to_dict(json_string: str) -> dict: - # Remove markdown-style ```json``` markers if present - json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip()) - - try: - return json.loads(json_cleaned) - except json.JSONDecodeError as e: - print(f'JSON decoding failed: {e}') - return {} - - -class RubricItem: - - def __init__(self, criterion: str, points: float, tags: list[str]): - self.criterion = criterion - self.points = points - self.tags = tags - - def __str__(self): - return f'[{self.points}] {self.criterion}' - - def to_dict(self): - return { - 'criterion': self.criterion, - 'points': self.points, - 'tags': self.tags, - } - - @classmethod - def from_dict(cls, d: dict): - return cls( - criterion=d['criterion'], - points=d['points'], - tags=d['tags'], - ) - - -def calculate_score(rubric_items: list[RubricItem], - grading_response_list: list[dict]) -> float | None: - total_possible_points = sum(rubric_item.points - for rubric_item in rubric_items - if rubric_item.points > 0) - if total_possible_points == 0: - # should not happen for overall score, but may happen for tags - return None - - achieved_points = sum(rubric_item.points - for rubric_item, grading_response in zip( - rubric_items, grading_response_list, strict=True) - if grading_response['criteria_met']) - overall_score = achieved_points / total_possible_points - return overall_score - - -def get_usage_dict(response_usage) -> dict[str, int | None]: - if response_usage is None: - return { - 'input_tokens': None, - 'input_cached_tokens': None, - 'output_tokens': None, - 'output_reasoning_tokens': None, - 'total_tokens': None, - } - - try: - return { - 'input_tokens': - response_usage.input_tokens, - 'input_cached_tokens': - response_usage.input_tokens_details.cached_tokens if hasattr( - response_usage.input_tokens_details, 'cached_tokens') else - response_usage.input_tokens_details['cached_tokens'], - 'output_tokens': - response_usage.output_tokens, - 'output_reasoning_tokens': - response_usage.output_tokens_details.reasoning_tokens if hasattr( - response_usage.output_tokens_details, 'reasoning_tokens') else - response_usage.output_tokens_details['reasoning_tokens'], - 'total_tokens': - response_usage.total_tokens, - } - except AttributeError: - return { - 'input_tokens': - response_usage.prompt_tokens, - 'input_cached_tokens': - response_usage.prompt_tokens_details.cached_tokens if hasattr( - response_usage.prompt_tokens_details, 'cached_tokens') else - response_usage.prompt_tokens_details['cached_tokens'], - 'output_tokens': - response_usage.completion_tokens, - 'output_reasoning_tokens': - response_usage.completion_tokens_details.reasoning_tokens - if hasattr(response_usage.completion_tokens_details, - 'reasoning_tokens') else - response_usage.completion_tokens_details['reasoning_tokens'], - 'total_tokens': - response_usage.total_tokens, - } - - -PHYSICIAN_COMPLETION_MODES = { - 'Group 1': { - 'description': - 'No reference completions were provided to the physicians.', - 'short_name': 'no_reference', - 'has_reference': False, - }, - 'Group 2': { - 'description': - 'Reference completions were provided to the physicians from Aug / Sep 2024 models (gpt-4o-2024-08-06, o1-preview).', - 'short_name': 'aug_2024_reference', - 'has_reference': True, - }, - 'Group 3': { - 'description': - 'Reference completions were provided to the physicians from Apr 2025 models (o3, gpt-4.1).', - 'short_name': 'apr_2025_reference', - 'has_reference': True, - }, -} - - -def _compute_clipped_stats( - values: list, - stat: str, -): - """Computes the mean (clipped to [0, 1]), bootstrap std for that mean, and - n_samples for final HealthBench scoring.""" - if stat == 'mean': - return np.clip(np.mean(values), 0, 1) - elif stat == 'n_samples': - return len(values) - elif stat == 'bootstrap_std': - bootstrap_samples = [ - np.random.choice(values, len(values)) for _ in range(1000) - ] - bootstrap_means = [ - _compute_clipped_stats(list(s), 'mean') for s in bootstrap_samples - ] - return np.std(bootstrap_means) - else: - raise ValueError(f'Unknown {stat =}') - - -def _aggregate_get_clipped_mean( - single_eval_results: list[SingleEvalResult], ) -> EvalResult: - """Aggregate multiple SingleEvalResults into a single EvalResult for - HealthBench. - - For each metric, returns the stats in _compute_clipped_stats. - """ - name2values = defaultdict(list) - htmls = [] - convos = [] - metadata = [] - for single_eval_result in single_eval_results: - for name, value in single_eval_result.metrics.items(): - name2values[name].append(value) - if single_eval_result.score is not None: - name2values['score'].append(single_eval_result.score) - htmls.append(single_eval_result.html) - convos.append(single_eval_result.convo) - metadata.append(single_eval_result.example_level_metadata) - final_metrics = {} - for name, values in name2values.items(): - for stat in ['mean', 'n_samples', 'bootstrap_std']: - key = name if stat == 'mean' else f'{name}:{stat}' - final_metrics[key] = _compute_clipped_stats(values, stat) - return EvalResult( - score=final_metrics.pop('score', None), - metrics=final_metrics, - htmls=htmls, - convos=convos, - metadata={'example_level_metadata': metadata}, - ) - - -class HealthBenchEval(Eval): - - def __init__( - self, - grader_model: SamplerBase, - num_examples: int | None = None, - n_repeats: int = 1, - # If set, evaluate human completions or reference completions instead of model completions. - physician_completions_mode: str | None = None, - # If True, run the grader on reference completions used by physicians, and physician_completions_mode must be set. - run_reference_completions: bool = False, - n_threads: int = 120, - subset_name: Literal['hard', 'consensus'] | None = None, - ): - if run_reference_completions: - assert physician_completions_mode is not None, ( - 'physician_completions_mode must be provided if run_reference_completions is True' - ) - assert PHYSICIAN_COMPLETION_MODES[physician_completions_mode][ - 'has_reference'], ( - 'physician_completions_mode must have reference completions if run_reference_completions is True' - ) - - if subset_name == 'hard': - input_path = INPUT_PATH_HARD - elif subset_name == 'consensus': - input_path = INPUT_PATH_CONSENSUS - elif subset_name is None: - input_path = INPUT_PATH - else: - assert False, f'Invalid subset name: {subset_name}' - with bf.BlobFile(input_path, 'rb') as f: - examples = [json.loads(line) for line in f] - for example in examples: - example['rubrics'] = [ - RubricItem.from_dict(d) for d in example['rubrics'] - ] - - rng = random.Random(0) - - # physician completions mode - self.physician_completions_mode = physician_completions_mode - if self.physician_completions_mode is not None: - assert self.physician_completions_mode in PHYSICIAN_COMPLETION_MODES, ( - f'Invalid physician completions mode: {self.physician_completions_mode}; must be one of {PHYSICIAN_COMPLETION_MODES.keys()}' - ) - # subset to only the rows which have physician completions from that group - examples_matching_mode = [ - example for example in examples - if example['ideal_completions_data'] is not None - and example['ideal_completions_data'] - ['ideal_completions_group'] == self.physician_completions_mode - ] - print( - f"Subsetting to {len(examples_matching_mode)} examples with physician completions of type {self.physician_completions_mode} ({PHYSICIAN_COMPLETION_MODES[self.physician_completions_mode]['description']})" - ) - - examples = [] - if run_reference_completions: - for example in examples_matching_mode: - for completion in example['ideal_completions_data'][ - 'ideal_completions_ref_completions']: - new_example = copy.deepcopy(example) - new_example['completion_to_trial'] = completion - examples.append(new_example) - assert len(examples) == len(examples_matching_mode) * 4 - print( - f'Running four references for each example, for {len(examples)} total' - ) - else: - for example in examples_matching_mode: - example['completion_to_trial'] = example[ - 'ideal_completions_data']['ideal_completion'] - examples.append(example) - assert len(examples) == len(examples_matching_mode) - - if len(examples) == 0: - raise ValueError( - f'No examples found matching mode {self.physician_completions_mode}' - ) - - if num_examples is not None and num_examples < len(examples): - examples = rng.sample( - examples, - num_examples, - ) - - self.examples = examples * n_repeats - self.n_threads = n_threads - self.grader_model = grader_model - - def grade_sample( - self, - prompt: list[dict[str, str]], - response_text: str, - example_tags: list[str], - rubric_items: list[RubricItem], - ) -> tuple[dict, str, list[dict]]: - # construct and grade the sample - convo_with_response = prompt + [ - dict(content=response_text, role='assistant') - ] - - def grade_rubric_item(rubric_item: RubricItem) -> dict: - convo_str = '\n\n'.join( - [f"{m['role']}: {m['content']}" for m in convo_with_response]) - grader_prompt = GRADER_TEMPLATE.replace('<>', - convo_str).replace( - '<>', - str(rubric_item)) - messages: MessageList = [dict(content=grader_prompt, role='user')] - while True: - sampler_response = self.grader_model(messages) - grading_response = sampler_response.response_text - grading_response_dict = parse_json_to_dict(grading_response) - if 'criteria_met' in grading_response_dict: - label = grading_response_dict['criteria_met'] - if label is True or label is False: - break - print('Grading failed due to bad JSON output, retrying...') - return grading_response_dict - - grading_response_list = common.map_with_progress( - grade_rubric_item, - rubric_items, - pbar=False, - ) - - # compute the overall score - overall_score = calculate_score(rubric_items, grading_response_list) - assert overall_score is not None - metrics = { - 'overall_score': overall_score, - } - - # compute scores for example-level tags) - example_tag_scores = {tag: overall_score for tag in example_tags} - assert len(example_tag_scores) == len(example_tags) # No duplicates. - metrics.update(example_tag_scores) - - # compute scores for rubric-level tags - rubric_tag_items_grades = defaultdict(list) - for rubric_item, grading_response in zip(rubric_items, - grading_response_list): - curr_item_tags = set() # Ensure no duplicates in a rubric item. - for tag in rubric_item.tags: - rubric_tag_items_grades[tag].append( - (rubric_item, grading_response)) - assert tag not in curr_item_tags - curr_item_tags.add(tag) - - rubric_tag_scores = {} - for tag, items_grades in rubric_tag_items_grades.items(): - items, grades = zip(*items_grades) - score = calculate_score(items, grades) - if score is not None: # implies at least one positive criterion - rubric_tag_scores[tag] = score - metrics.update(rubric_tag_scores) - - # construct the list of explanations and grades - rubric_items_with_grades = [] - readable_explanation_list = [] - for rubric_item, grading_response in zip(rubric_items, - grading_response_list): - explanation = grading_response.get('explanation', - 'No explanation provided') - criteria_met = grading_response['criteria_met'] - readable_explanation = ( - f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}' - ) - readable_explanation_list.append(readable_explanation) - rubric_items_with_grades.append({ - **rubric_item.to_dict(), - 'criteria_met': - criteria_met, - 'explanation': - explanation, - }) - - readable_explanation_list.sort(key=lambda x: x.startswith('[False]'), - reverse=True) - readable_explanation_str = '\n\n'.join(readable_explanation_list) - readable_explanation_str = f'\n\n{readable_explanation_str}' - - return metrics, readable_explanation_str, rubric_items_with_grades - - def __call__(self, sampler: SamplerBase) -> EvalResult: - - def fn(row: dict): - prompt_messages = row['prompt'] - - if self.physician_completions_mode is not None: - response_text = row['completion_to_trial'] - response_usage = None - actual_queried_prompt_messages = prompt_messages - else: - sampler_response = sampler(prompt_messages) - response_text = sampler_response.response_text - response_dict = sampler_response.response_metadata - actual_queried_prompt_messages = ( - sampler_response.actual_queried_message_list) - response_usage = response_dict.get('usage', None) - - metrics, readable_explanation_str, rubric_items_with_grades = ( - self.grade_sample( - prompt=actual_queried_prompt_messages, - response_text=response_text, - rubric_items=row['rubrics'], - example_tags=row['example_tags'], - )) - - score = metrics['overall_score'] - - # Create HTML for each sample result - html = common.jinja_env.from_string( - HEALTHBENCH_HTML_JINJA.replace( - '{{ rubric_grades }}', - readable_explanation_str.replace('\n', '
'), - )).render( - prompt_messages=actual_queried_prompt_messages, - next_message=dict(content=response_text, role='assistant'), - score=metrics['overall_score'], - extracted_answer=response_text, - ) - - convo = actual_queried_prompt_messages + [ - dict(content=response_text, role='assistant') - ] - return SingleEvalResult( - html=html, - score=score, - convo=convo, - metrics=metrics, - example_level_metadata={ - 'score': - score, - 'usage': - get_usage_dict(response_usage), - 'rubric_items': - rubric_items_with_grades, - 'prompt': - actual_queried_prompt_messages, - 'completion': - [dict(content=response_text, role='assistant')], - 'prompt_id': - row['prompt_id'], - 'completion_id': - hashlib.sha256( - (row['prompt_id'] + - response_text).encode('utf-8')).hexdigest(), - }, - ) - - results = common.map_with_progress( - fn, - self.examples, - num_threads=self.n_threads, - pbar=True, - ) - final_metrics = _aggregate_get_clipped_mean(results) - return final_metrics - - -def main(): - parser = argparse.ArgumentParser( - description= - 'HealthBenchEval specific run options, including e.g., running the eval on physician completions rows only.' - ) - parser.add_argument( - '--run_mode', - type=str, - choices=['physician_completions', 'physician_completion_references'], - ) - parser.add_argument('--examples', - type=int, - help='Number of examples to run') - parser.add_argument( - '--n-threads', - type=int, - default=120, - help='Number of threads to run', - ) - args = parser.parse_args() - - if args.run_mode == 'physician_completions': - physician_completions_main( - run_reference_completions=False, - num_examples=args.examples, - n_threads=args.n_threads or 1, - ) - elif args.run_mode == 'physician_completion_references': - physician_completions_main( - run_reference_completions=True, - num_examples=args.examples, - n_threads=args.n_threads or 1, - ) - - else: - raise ValueError(f'Invalid run mode: {args.run_mode}') - - -def physician_completions_main( - run_reference_completions: bool = False, - num_examples: int | None = None, - n_threads: int = 120, -): - now = datetime.now() - date_str = now.strftime('%Y%m%d_%H%M') - - grading_sampler = ChatCompletionSampler( - model='gpt-4.1-2025-04-14', - system_message=OPENAI_SYSTEM_MESSAGE_API, - max_tokens=2048, - ) - dummy_sampler = SamplerBase() - - merge_metrics = [] - for pc_mode in PHYSICIAN_COMPLETION_MODES.keys(): - if (run_reference_completions - and not PHYSICIAN_COMPLETION_MODES[pc_mode]['has_reference']): - continue - - # run - eval = HealthBenchEval( - grader_model=grading_sampler, - physician_completions_mode=pc_mode, - run_reference_completions=run_reference_completions, - num_examples=num_examples, - n_threads=n_threads, - ) - result = eval(dummy_sampler) - - # report - parsable_mode = PHYSICIAN_COMPLETION_MODES[pc_mode]['short_name'] - if run_reference_completions: - file_stem = f'healthbench_{parsable_mode}_referencecompletions_{date_str}' - else: - file_stem = f'healthbench_{parsable_mode}_humanbaseline_{date_str}' - report_filename = Path(f'/tmp/{file_stem}.html') - report_filename.write_text(common.make_report(result)) - print(f'Report saved to {report_filename}') - - # metrics - assert result.metrics is not None - metrics = result.metrics - result_filename = Path(f'/tmp/{file_stem}.json') - result_filename.write_text(json.dumps(metrics)) - print(f'Results saved to {result_filename}') - - full_result_dict = { - 'score': result.score, - 'metrics': result.metrics, - 'htmls': result.htmls, - 'convos': result.convos, - 'metadata': result.metadata, - } - full_result_filename = Path(f'/tmp/{file_stem}_allresults.json') - full_result_filename.write_text(json.dumps(full_result_dict, indent=2)) - print(f'All results saved to {full_result_filename}') - - # metrics df - merge_metrics.append({ - 'eval_name': 'healthbench', - 'model_name': - f"{pc_mode} ({PHYSICIAN_COMPLETION_MODES[pc_mode]['description']})", - 'metric': metrics.get('overall_score', None), - }) - - merge_metrics_df = pd.DataFrame(merge_metrics).pivot(index=['model_name'], - columns='eval_name') - print('\nAll results: ') - print(merge_metrics_df.to_markdown()) - return merge_metrics - - -if __name__ == '__main__': - main() From 37be7655f57cedf4bf1cb0a72050f82a19fe7bf9 Mon Sep 17 00:00:00 2001 From: bio-mlhui Date: Thu, 29 May 2025 04:28:00 +0000 Subject: [PATCH 5/8] fix bench --- opencompass/datasets/healthbench/healthbench.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencompass/datasets/healthbench/healthbench.py b/opencompass/datasets/healthbench/healthbench.py index 4e5f7434..8e12bbc4 100644 --- a/opencompass/datasets/healthbench/healthbench.py +++ b/opencompass/datasets/healthbench/healthbench.py @@ -247,7 +247,7 @@ class HealthBenchDataset(BaseDataset): case _: raise Exception(f'Unrecognized subset type: {subset}') # noqa: W291, E501 dataset = load_dataset(path, data_files=data_files, split='test') - dataset = dataset.select(range(2)) + # dataset = dataset.select(range(2)) dataset = dataset.map(lambda item: _parse(item)) return dataset From 31f26c6a83eb7e8dc554d97ea74fd308ca53c48d Mon Sep 17 00:00:00 2001 From: bio-mlhui Date: Thu, 29 May 2025 05:56:33 +0000 Subject: [PATCH 6/8] fix bench --- .../datasets/HealthBench/healthbench_model_gen_831613.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/opencompass/configs/datasets/HealthBench/healthbench_model_gen_831613.py b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_831613.py index 3a03e637..0d32cbe7 100644 --- a/opencompass/configs/datasets/HealthBench/healthbench_model_gen_831613.py +++ b/opencompass/configs/datasets/HealthBench/healthbench_model_gen_831613.py @@ -1,4 +1,4 @@ -from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator, HealthBenchDatasetMeta +from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator from opencompass.openicl.icl_inferencer import ChatInferencer from opencompass.openicl.icl_prompt_template import PromptTemplate from opencompass.openicl.icl_retriever import ZeroRetriever @@ -12,6 +12,7 @@ reader_cfg = dict( output_column='prompt_id', # useless ) + infer_cfg = dict( prompt_template=dict( type=PromptTemplate, From 7687f8aa3c0ab334559d5338197af4e623cd2b6c Mon Sep 17 00:00:00 2001 From: bio-mlhui Date: Thu, 29 May 2025 06:14:23 +0000 Subject: [PATCH 7/8] fix soft link --- root | 1 - 1 file changed, 1 deletion(-) delete mode 120000 root diff --git a/root b/root deleted file mode 120000 index 433c9364..00000000 --- a/root +++ /dev/null @@ -1 +0,0 @@ -/root \ No newline at end of file From 6aabba778da7a31ffb4258c3b5bd19c9e9c76327 Mon Sep 17 00:00:00 2001 From: bio-mlhui Date: Thu, 29 May 2025 07:12:42 +0000 Subject: [PATCH 8/8] fix bench --- opencompass/datasets/healthbench/healthbench.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/opencompass/datasets/healthbench/healthbench.py b/opencompass/datasets/healthbench/healthbench.py index 8e12bbc4..2ec84608 100644 --- a/opencompass/datasets/healthbench/healthbench.py +++ b/opencompass/datasets/healthbench/healthbench.py @@ -1,5 +1,6 @@ import hashlib import json +import os import re from collections import defaultdict from typing import Literal @@ -260,7 +261,7 @@ class HealthBenchEvaluator(BaseEvaluator): self.n_repeats = n_repeats self.n_threads = n_threads self.subset_name = subset_name - self.grader_model = ChatCompletionSampler(model='gpt-4.1-2025-04-14', system_message=OPENAI_SYSTEM_MESSAGE_API, max_tokens=2048,) # noqa: E501 + self.grader_model = ChatCompletionSampler(model=os.environ['OC_JUDGE_MODEL'], system_message=OPENAI_SYSTEM_MESSAGE_API, max_tokens=2048,) # noqa: E501 def grade_sample(self, prompt: list[dict[str, str]], response_text: str, example_tags: list[str], rubric_items: list[RubricItem], ) -> tuple[dict, str, list[dict]]: # noqa: E501 # construct and grade the sample