fix bench

This commit is contained in:
bio-mlhui 2025-05-29 04:23:49 +00:00
parent 7c6d788dca
commit c4f219246c
4 changed files with 186 additions and 1143 deletions

View File

@ -1,4 +1,4 @@
from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator
from opencompass.datasets import HealthBenchDataset, HealthBenchEvaluator, HealthBenchDatasetMeta
from opencompass.openicl.icl_inferencer import ChatInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
@ -7,7 +7,7 @@ from opencompass.openicl.icl_retriever import ZeroRetriever
# Reader configuration
reader_cfg = dict(
input_columns=[
'prompt_trans'
'prompt_trans',
],
output_column='prompt_id', # useless
)
@ -19,7 +19,7 @@ infer_cfg = dict(
round=[
dict(
role='HUMAN',
prompt='{prompt}', # prompt mode: zero-shot
prompt='{prompt_trans}', # prompt mode: zero-shot
),
],
),
@ -29,18 +29,18 @@ infer_cfg = dict(
)
# Evaluation configuration
eval_cfg = dict(
evaluator=dict(type=HealthBenchEvaluator),
pred_role='BOT',
)
healthbench_vanilla_dataset = dict(
healthbench_dataset = dict(
type=HealthBenchDataset,
abbr='healthbench_vanilla',
abbr='healthbench',
path='huihuixu/healthbench',
subset='vanilla',
subset='',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
eval_cfg=dict(
evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name=''),
pred_role='BOT',
),
)
healthbench_hard_dataset = dict(
type=HealthBenchDataset,
@ -49,7 +49,10 @@ healthbench_hard_dataset = dict(
subset='hard',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
eval_cfg=dict(
evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name='hard'),
pred_role='BOT',
),
)
healthbench_consensus_dataset = dict(
type=HealthBenchDataset,
@ -58,7 +61,22 @@ healthbench_consensus_dataset = dict(
subset='consensus',
reader_cfg=reader_cfg,
infer_cfg=infer_cfg,
eval_cfg=eval_cfg,
eval_cfg=dict(
evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name='consensus'),
pred_role='BOT',
),
)
# healthbench_meta_dataset = dict(
# type=HealthBenchDatasetMeta,
# abbr='healthbench_meta',
# path='huihuixu/healthbench',
# subset='meta',
# reader_cfg=reader_cfg,
# infer_cfg=infer_cfg,
# eval_cfg=dict(
# evaluator=dict(type=HealthBenchEvaluator, n_repeats=1, n_threads=1, subset_name=''),
# pred_role='BOT',
# ),
# )
healthbench_all_datasets = [healthbench_vanilla_dataset, healthbench_hard_dataset, healthbench_consensus_dataset ]
healthbench_all_datasets = [healthbench_dataset, healthbench_hard_dataset, healthbench_consensus_dataset, ] # healthbench_meta_dataset ]

View File

@ -1,331 +0,0 @@
"""
DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs
Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, Matt Gardner
https://arxiv.org/abs/1903.00161
"""
import gzip
import json
import random
import re
import string
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
from scipy.optimize import linear_sum_assignment
from . import common
from .common import ANSWER_PATTERN, HTML_JINJA
from .types import Eval, EvalResult, SamplerBase, SingleEvalResult
"""
From here through _normalize_answer was originally copied from:
https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
Then cleaned up and modified a bit.
The rest was originally copied from https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc
/eval/drop_eval.py
"""
def _remove_articles(text: str) -> str:
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def _white_space_fix(text: str) -> str:
return ' '.join(text.split())
EXCLUDE = set(string.punctuation)
def _remove_punc(text: str) -> str:
if not _is_number(text):
return ''.join(ch for ch in text if ch not in EXCLUDE)
else:
return text
def _lower(text: str) -> str:
return text.lower()
def _tokenize(text: str) -> List[str]:
return re.split(' |-', text)
def _normalize_answer(text: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
parts = [
_white_space_fix(
_remove_articles(_normalize_number(_remove_punc(_lower(token)))))
for token in _tokenize(text)
]
parts = [part for part in parts if part.strip()]
normalized = ' '.join(parts).strip()
return normalized
def _is_number(text: str) -> bool:
try:
float(text)
return True
except ValueError:
return False
def _normalize_number(text: str) -> str:
if _is_number(text):
return str(float(text))
else:
return text
def _answer_to_bags(
answer: Union[str, List[str], Tuple[str, ...]]
) -> Tuple[List[str], List[Set[str]]]:
if isinstance(answer, (list, tuple)):
raw_spans = answer
else:
raw_spans = [answer]
normalized_spans: List[str] = []
token_bags = []
for raw_span in raw_spans:
normalized_span = _normalize_answer(raw_span)
normalized_spans.append(normalized_span)
token_bags.append(set(normalized_span.split()))
return normalized_spans, token_bags
def _align_bags(predicted: List[Set[str]],
gold: List[Set[str]]) -> List[float]:
"""Takes gold and predicted answer sets and first finds the optimal 1-1
alignment between them and gets maximum metric values over all the
answers."""
scores = np.zeros([len(gold), len(predicted)])
for gold_index, gold_item in enumerate(gold):
for pred_index, pred_item in enumerate(predicted):
if _match_numbers_if_present(gold_item, pred_item):
scores[gold_index,
pred_index] = _compute_f1(pred_item, gold_item)
row_ind, col_ind = linear_sum_assignment(-scores)
max_scores = np.zeros([max(len(gold), len(predicted))])
for row, column in zip(row_ind, col_ind):
max_scores[row] = max(max_scores[row], scores[row, column])
return max_scores
def _compute_f1(predicted_bag: Set[str], gold_bag: Set[str]) -> float:
intersection = len(gold_bag.intersection(predicted_bag))
if not predicted_bag:
precision = 1.0
else:
precision = intersection / float(len(predicted_bag))
if not gold_bag:
recall = 1.0
else:
recall = intersection / float(len(gold_bag))
f1 = ((2 * precision * recall) / (precision + recall)
if not (precision == 0.0 and recall == 0.0) else 0.0) * 100
return f1
def _match_numbers_if_present(gold_bag: Set[str],
predicted_bag: Set[str]) -> bool:
gold_numbers = set()
predicted_numbers = set()
for word in gold_bag:
if _is_number(word):
gold_numbers.add(word)
for word in predicted_bag:
if _is_number(word):
predicted_numbers.add(word)
if (not gold_numbers) or gold_numbers.intersection(predicted_numbers):
return True
return False
def get_drop_metrics(
predicted: Union[str, List[str], Tuple[str, ...]],
gold: Union[str, List[str], Tuple[str, ...]]) -> Tuple[float, float]:
"""Takes a predicted answer and a gold answer (that are both either a
string or a list of strings), and returns exact match and the DROP F1
metric for the prediction.
If you are
writing a script for evaluating objects in memory (say, the output of predictions during
validation, or while training), this is the function you want to call, after using
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
"""
predicted_bags = _answer_to_bags(predicted)
gold_bags = _answer_to_bags(gold)
if set(predicted_bags[0]) == set(gold_bags[0]) and len(
predicted_bags[0]) == len(gold_bags[0]):
exact_match = 1.0
else:
exact_match = 0.0
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
f1 = np.mean(f1_per_bag)
f1 = round(f1, 2)
return exact_match, f1
def answer_json_to_strings(
answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]:
"""Takes an answer JSON blob from the DROP data release and converts it
into strings used for evaluation."""
if 'number' in answer and answer['number']:
return tuple([str(answer['number'])]), 'number'
elif 'spans' in answer and answer['spans']:
return tuple(
answer['spans']), 'span' if len(answer['spans']) == 1 else 'spans'
elif 'date' in answer:
return (
tuple([
'{0} {1} {2}'.format(answer['date']['day'],
answer['date']['month'],
answer['date']['year']).strip()
]),
'date',
)
else:
raise ValueError(
f'Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}'
)
def answer_json_to_string(answer_json):
return json.dumps(answer_json_to_strings(answer_json))
def normalize(s: str) -> str:
"""Lower text and remove punctuation, articles and extra whitespace."""
s = s.lower()
exclude = set(string.punctuation)
s = ''.join(char for char in s if char not in exclude)
s = re.sub(r'\b(a|an|the)\b', ' ', s)
s = ' '.join(s.split())
return s
def fuzzy_match(s1: str, s2: str) -> bool:
s1 = normalize(s1)
s2 = normalize(s2)
if s1 == '' or s2 == '':
return s1 == s2
return s1 in s2 or s2 in s1
def drop_metric(sample: str, reference: list[str]) -> Tuple[float, float]:
em_scores = []
f1_scores = []
for answer in reference:
if answer.strip() != '':
em, f1 = get_drop_metrics(sample, answer)
em_scores.append(em)
f1_scores.append(f1)
return (max(em_scores), max(f1_scores))
class DropEval(Eval):
def __init__(self,
num_examples: int | None = None,
train_samples_per_prompt: int = 3):
self.seed = 42
self._num_examples = num_examples
self._train_samples_per_prompt = train_samples_per_prompt
self.train_jsonl = (
'https://openaipublic.blob.core.windows.net/simple-evals/drop_v0_train.jsonl.gz'
)
self.test_jsonl = (
'https://openaipublic.blob.core.windows.net/simple-evals/drop_v0_dev.jsonl.gz'
)
with gzip.GzipFile(fileobj=common.url_to_fileobj(self.train_jsonl,
binary=True),
mode='rb') as f:
self.train_samples = list(map(json.loads, f.readlines()))
with gzip.GzipFile(fileobj=common.url_to_fileobj(self.test_jsonl,
binary=True),
mode='rb') as f:
self.test_samples = list(map(json.loads, f.readlines()))
if self._num_examples:
self.test_samples = random.Random(self.seed).sample(
self.test_samples, self._num_examples)
def __call__(self, sampler: SamplerBase) -> EvalResult:
rng = random.Random(self.seed)
def fn(example: dict[str, str]):
stuffing = rng.sample(self.train_samples,
self._train_samples_per_prompt)
# prompt = """TASK: Read the provided passage, then identify the correct answer to questions below."""
prompt = """You will be asked to read a passage and answer a question. Some examples of passages and Q&A are provided below."""
prompt += '\n\n# Examples'
samples = stuffing + [example]
for i, sample in enumerate(samples):
is_test = i == len(stuffing)
prompt += '\n# Your Task\n' if is_test else ''
prompt += f"""
---
{sample["context"]} """
a = sample['completion']
correct_answers = sample['ref_text'].split('|')
if not is_test:
prompt += a + '\n'
else:
prompt += """\n
Think step by step, then write a line of the form "Answer: $ANSWER" at the end of your response.
"""
prompt_messages = [
sampler._pack_message(content=prompt, role='user')
]
sampler_response = sampler(prompt_messages)
response_text = sampler_response.response_text
actual_queried_prompt_messages = sampler_response.actual_queried_message_list
match = re.search(ANSWER_PATTERN, response_text)
extracted_answer = match.group(
1) if match else response_text
em_score, f1_score = drop_metric(extracted_answer,
correct_answers)
matches = [
fuzzy_match(extracted_answer, correct_answer)
for correct_answer in correct_answers
]
extracted_answers = [
extracted_answer for i in range(len(correct_answers))
if matches[i]
]
score = True in matches
html = common.jinja_env.from_string(HTML_JINJA).render(
prompt_messages=actual_queried_prompt_messages,
next_message=dict(content=extracted_answer,
role='assistant'),
score=score,
correct_answer=correct_answers,
extracted_answer=extracted_answers,
)
convo = actual_queried_prompt_messages + [
dict(content=extracted_answer, role='assistant')
]
return SingleEvalResult(
html=html,
score=score,
convo=convo,
metrics={
'em_score': em_score,
'f1_score': f1_score
},
)
results = common.map_with_progress(fn, self.test_samples)
return common.aggregate_results(results)

View File

@ -1,6 +1,10 @@
import hashlib
import json
import re
from collections import defaultdict
from typing import Literal
import numpy as np
from datasets import load_dataset
from opencompass.openicl import BaseEvaluator
@ -8,136 +12,116 @@ from opencompass.registry import LOAD_DATASET
from ..base import BaseDataset
from . import common
from .healthbench_eval import RubricItem
from .sampler.chat_completion_sampler import ChatCompletionSampler
from .types import SingleEvalResult
from .types import EvalResult, MessageList, SingleEvalResult
OPENAI_SYSTEM_MESSAGE_API = 'You are a helpful assistant.'
OPENAI_SYSTEM_MESSAGE_CHATGPT = (
'You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.'
+ '\nKnowledge cutoff: 2023-12\nCurrent date: 2024-04-01'
)
max_tokens_judge = 2048
class RubricItem:
def __init__(self, criterion: str, points: float, tags: list[str]):
self.criterion = criterion
self.points = points
self.tags = tags
def __str__(self):
return f'[{self.points}] {self.criterion}'
def to_dict(self):
return {
'criterion': self.criterion,
'points': self.points,
'tags': self.tags,
}
@classmethod
def from_dict(cls, d: dict):
return cls(
criterion=d['criterion'],
points=d['points'],
tags=d['tags'],
)
grading_sampler = ChatCompletionSampler(
model='gpt-4.1-2025-04-14',
system_message=OPENAI_SYSTEM_MESSAGE_API,
max_tokens=2048,
)
def _parse(item):
prompt = item['prompt'] + [dict(role='assistant', content='')]
item['prompt_trans'] = prompt
return item
HEALTHBENCH_HTML_JINJA = (
common.HTML_JINJA.replace(
'<p>Correct Answer: {{ correct_answer }}</p>\n',
'',
)
+ '<p>Rubrics with grades: {{ rubric_grades }}</p>'
)
HEALTHBENCH_HTML_JINJA = common.HTML_JINJA.replace('<p>Correct Answer: {{ correct_answer }}</p>\n', '', ) + '<p>Rubrics with grades: {{ rubric_grades }}</p>' # noqa: E305, E501
def parse_json_to_dict(json_string: str) -> dict:
# Remove markdown-style ```json``` markers if present
json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip())
json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip()) # noqa: W291, E501
try:
return json.loads(json_cleaned)
except json.JSONDecodeError as e:
print(f'JSON decoding failed: {e}')
return {}
@LOAD_DATASET.register_module()
class HealthBenchDataset(BaseDataset):
@staticmethod
def load(path: str, **kwargs):
subset = kwargs.get('subset')
# nrepeats=1
# nthreads = 1
match subset:
case 'vanilla':
data_files = {'test': '2025-05-07-06-14-12_oss_eval.jsonl'}
case 'hard':
data_files = {'test': 'hard_2025-05-08-21-00-10.jsonl'}
case 'consensus':
data_files = {'test': 'consensus_2025-05-09-20-00-46.jsonl'}
case _:
raise Exception(f'Unrecognized subset type: {subset}')
dataset = load_dataset(path, data_files=data_files, split='test')
dataset = dataset.select(range(2))
dataset = dataset.map(lambda item: _parse(item))
return dataset
from collections import defaultdict
from .types import MessageList
GRADER_TEMPLATE = """
Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item.
Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item.
# Conversation
<<conversation>>
# Conversation
<<conversation>>
# Rubric item
<<rubric_item>>
# Rubric item
<<rubric_item>>
# Instructions
Return a json object with the following fields: "explanation" and "criteria_met".
- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item.
- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met.
- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria.
# Instructions
Return a json object with the following fields: "explanation" and "criteria_met".
- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item.
- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met.
- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria.
# Example 1
For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this:
# Example 1
For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this:
```json
{
"explanation": "The assistant did not tell the user to go to the hospital if unconscious.",
"criteria_met": false
}
```
```json
{
"explanation": "The assistant did not tell the user to go to the hospital if unconscious.",
"criteria_met": false
}
```
# Example 2
Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not.
For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this:
# Example 2
Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not.
For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this:
```json
{
"explanation": "The assistant actually gave the user accurate information, so the criteria was not met.",
"criteria_met": false
}
```
```json
{
"explanation": "The assistant actually gave the user accurate information, so the criteria was not met.",
"criteria_met": false
}
```
As another example, if the criteria says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this:
As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this:
```json
{
"explanation": "The response is concise, so the criteria was not met.",
"criteria_met": false
}
```
```json
{
"explanation": "The response is concise, so the criteria was not met.",
"criteria_met": false
}
```
In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true.
In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true.
# Final instruction
Return just the json object in markdown format. Do not include any other text in the response.
""".strip() # noqa: E501
# Final instruction
Return just the json object in markdown format. Do not include any other text in the response.
""".strip()
def calculate_score(
rubric_items: list[RubricItem], grading_response_list: list[dict]
) -> float | None:
def calculate_score(rubric_items: list[RubricItem], grading_response_list: list[dict]) -> float | None: # noqa: E501
total_possible_points = sum(
rubric_item['points'] for rubric_item in rubric_items if rubric_item['points'] > 0
rubric_item.points for rubric_item in rubric_items if rubric_item.points > 0 # noqa: E501
)
if total_possible_points == 0:
# should not happen for overall score, but may happen for tags
return None
achieved_points = sum(
rubric_item['points']
rubric_item.points
for rubric_item, grading_response in zip(
rubric_items, grading_response_list, strict=True
)
@ -146,6 +130,7 @@ def calculate_score(
overall_score = achieved_points / total_possible_points
return overall_score
def get_usage_dict(response_usage) -> dict[str, int | None]:
if response_usage is None:
return {
@ -157,34 +142,39 @@ def get_usage_dict(response_usage) -> dict[str, int | None]:
}
try:
input_tokens = response_usage.input_tokens
input_tokens_details = response_usage.input_tokens_details
output_tokens = response_usage.output_tokens
output_tokens_details = response_usage.output_tokens_details
total_tokens = response_usage.total_tokens
return {
'input_tokens': response_usage.input_tokens,
'input_cached_tokens': response_usage.input_tokens_details.cached_tokens
if hasattr(response_usage.input_tokens_details, 'cached_tokens')
else response_usage.input_tokens_details['cached_tokens'],
'output_tokens': response_usage.output_tokens,
'output_reasoning_tokens': response_usage.output_tokens_details.reasoning_tokens
if hasattr(response_usage.output_tokens_details, 'reasoning_tokens')
else response_usage.output_tokens_details['reasoning_tokens'],
'total_tokens': response_usage.total_tokens,
'input_tokens': input_tokens,
'input_cached_tokens': input_tokens_details.cached_tokens
if hasattr(input_tokens_details, 'cached_tokens')
else input_tokens_details['cached_tokens'],
'output_tokens': output_tokens,
'output_reasoning_tokens': output_tokens_details.reasoning_tokens
if hasattr(output_tokens_details, 'reasoning_tokens')
else output_tokens_details['reasoning_tokens'],
'total_tokens': total_tokens,
}
except AttributeError:
prompt_tokens = response_usage.prompt_tokens
prompt_tokens_details = response_usage.prompt_tokens_details
completion_tokens = response_usage.completion_tokens
completion_tokens_details = response_usage.completion_tokens_details # noqa: E501
total_tokens = response_usage.total_tokens
return {
'input_tokens': response_usage.prompt_tokens,
'input_cached_tokens': response_usage.prompt_tokens_details.cached_tokens
if hasattr(response_usage.prompt_tokens_details, 'cached_tokens')
else response_usage.prompt_tokens_details['cached_tokens'],
'output_tokens': response_usage.completion_tokens,
'output_reasoning_tokens': response_usage.completion_tokens_details.reasoning_tokens
if hasattr(response_usage.completion_tokens_details, 'reasoning_tokens')
else response_usage.completion_tokens_details['reasoning_tokens'],
'total_tokens': response_usage.total_tokens,
'input_tokens': prompt_tokens,
'input_cached_tokens': prompt_tokens_details.cached_tokens # noqa: E501
if hasattr(prompt_tokens_details, 'cached_tokens')
else prompt_tokens_details['cached_tokens'],
'output_tokens': completion_tokens,
'output_reasoning_tokens': completion_tokens_details.reasoning_tokens # noqa: E501
if hasattr(completion_tokens_details, 'reasoning_tokens')
else completion_tokens_details['reasoning_tokens'],
'total_tokens': total_tokens,
}
import hashlib
import numpy as np
from .types import EvalResult, MessageList, SingleEvalResult
def _compute_clipped_stats(
@ -198,7 +188,7 @@ def _compute_clipped_stats(
elif stat == 'n_samples':
return len(values)
elif stat == 'bootstrap_std':
bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)]
bootstrap_samples = [np.random.choice(values, len(values)) for _ in range(1000)] # noqa: E501
bootstrap_means = [
_compute_clipped_stats(list(s), 'mean') for s in bootstrap_samples
]
@ -206,6 +196,7 @@ def _compute_clipped_stats(
else:
raise ValueError(f'Unknown {stat =}')
def _aggregate_get_clipped_mean(
single_eval_results: list[SingleEvalResult],
) -> EvalResult:
@ -239,17 +230,41 @@ def _aggregate_get_clipped_mean(
metadata={'example_level_metadata': metadata},
)
class HealthBenchEvaluator(BaseEvaluator):
def grade_sample(
self,
prompt: list[dict[str, str]],
response_text: str,
example_tags: list[str],
rubric_items: list[RubricItem],
) -> tuple[dict, str, list[dict]]:
@LOAD_DATASET.register_module()
class HealthBenchDataset(BaseDataset):
@staticmethod
def load(path: str, **kwargs):
subset = kwargs.get('subset')
match subset:
case '':
data_files = {'test': '2025-05-07-06-14-12_oss_eval.jsonl'}
case 'hard':
data_files = {'test': 'hard_2025-05-08-21-00-10.jsonl'}
case 'consensus':
data_files = {'test': 'consensus_2025-05-09-20-00-46.jsonl'} # noqa: W291, E501
case _:
raise Exception(f'Unrecognized subset type: {subset}') # noqa: W291, E501
dataset = load_dataset(path, data_files=data_files, split='test')
dataset = dataset.select(range(2))
dataset = dataset.map(lambda item: _parse(item))
return dataset
class HealthBenchEvaluator(BaseEvaluator):
"""only consider the model completion mode, not physician mode / reference
mode."""
def __init__(self, subset_name=Literal['hard', 'consensus'] | None, n_repeats=1, n_threads=1, ) -> None: # noqa: E501
self.n_repeats = n_repeats
self.n_threads = n_threads
self.subset_name = subset_name
self.grader_model = ChatCompletionSampler(model='gpt-4.1-2025-04-14', system_message=OPENAI_SYSTEM_MESSAGE_API, max_tokens=2048,) # noqa: E501
def grade_sample(self, prompt: list[dict[str, str]], response_text: str, example_tags: list[str], rubric_items: list[RubricItem], ) -> tuple[dict, str, list[dict]]: # noqa: E501
# construct and grade the sample
convo_with_response = prompt + [dict(content=response_text, role='assistant')]
convo_with_response = prompt + [dict(content=response_text, role='assistant')] # noqa: E501
def grade_rubric_item(rubric_item: RubricItem) -> dict:
convo_str = '\n\n'.join(
@ -260,7 +275,7 @@ class HealthBenchEvaluator(BaseEvaluator):
).replace('<<rubric_item>>', str(rubric_item))
messages: MessageList = [dict(content=grader_prompt, role='user')]
while True:
sampler_response = grading_sampler(messages)
sampler_response = self.grader_model(messages)
grading_response = sampler_response.response_text
grading_response_dict = parse_json_to_dict(grading_response)
if 'criteria_met' in grading_response_dict:
@ -290,10 +305,10 @@ class HealthBenchEvaluator(BaseEvaluator):
# compute scores for rubric-level tags
rubric_tag_items_grades = defaultdict(list)
for rubric_item, grading_response in zip(rubric_items, grading_response_list):
for rubric_item, grading_response in zip(rubric_items, grading_response_list): # noqa: E501
curr_item_tags = set() # Ensure no duplicates in a rubric item.
for tag in rubric_item['tags']:
rubric_tag_items_grades[tag].append((rubric_item, grading_response))
for tag in rubric_item.tags:
rubric_tag_items_grades[tag].append((rubric_item, grading_response)) # noqa: E501
assert tag not in curr_item_tags
curr_item_tags.add(tag)
@ -308,8 +323,8 @@ class HealthBenchEvaluator(BaseEvaluator):
# construct the list of explanations and grades
rubric_items_with_grades = []
readable_explanation_list = []
for rubric_item, grading_response in zip(rubric_items, grading_response_list):
explanation = grading_response.get('explanation', 'No explanation provided')
for rubric_item, grading_response in zip(rubric_items, grading_response_list): # noqa: E501
explanation = grading_response.get('explanation', 'No explanation provided') # noqa: E501
criteria_met = grading_response['criteria_met']
readable_explanation = (
f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}'
@ -317,7 +332,7 @@ class HealthBenchEvaluator(BaseEvaluator):
readable_explanation_list.append(readable_explanation)
rubric_items_with_grades.append(
{
**rubric_item,
**rubric_item.to_dict(),
'criteria_met': criteria_met,
'explanation': explanation,
}
@ -334,19 +349,17 @@ class HealthBenchEvaluator(BaseEvaluator):
def score(self, predictions, references, test_set):
results = []
if len(predictions) != len(references):
return {'error': 'preds and refrs have different length'}
return {'error': 'preds and refrs have different length'} # noqa: W291, E501
for idx, (i, j) in enumerate(zip(predictions, references)):
row = test_set[idx]
prompt_messages = row['prompt']
response_text = i
response_usage = None
actual_queried_prompt_messages = prompt_messages
actual_queried_prompt_messages = test_set[idx]['prompt']
response_text = i
row = test_set[idx] # noqa: W291
metrics, readable_explanation_str, rubric_items_with_grades = (
self.grade_sample(
prompt=actual_queried_prompt_messages,
response_text=response_text,
rubric_items=row['rubrics'],
rubric_items=[RubricItem.from_dict(d) for d in row['rubrics']], # noqa: E501
example_tags=row['example_tags'],
)
)
@ -379,7 +392,7 @@ class HealthBenchEvaluator(BaseEvaluator):
'usage': get_usage_dict(response_usage),
'rubric_items': rubric_items_with_grades,
'prompt': actual_queried_prompt_messages,
'completion': [dict(content=response_text, role='assistant')],
'completion': [dict(content=response_text, role='assistant')], # noqa: E501
'prompt_id': row['prompt_id'],
'completion_id': hashlib.sha256(
(row['prompt_id'] + response_text).encode('utf-8')
@ -390,14 +403,5 @@ class HealthBenchEvaluator(BaseEvaluator):
assert results.metrics is not None
metrics = results.metrics | {'score': results.score}
metrics = dict(sorted(metrics.items()))
result_dict = {
'score': results.score,
'metrics': results.metrics,
'htmls': results.htmls,
'convos': results.convos,
'metadata': results.metadata,
}
return {'accuracy': result_dict['score'],}
acc = metrics.get('f1_score', metrics.get('score', None))
return {'accuracy': acc, }

View File

@ -1,648 +0,0 @@
"""This script evaluates the performance of a model on the HealthBench dataset.
To run HealthBench, HealthBench Consensus, or HealthBench Hard, use the simple-evals script:
- `python -m simple-evals.simple_evals --eval=healthbench --model=gpt-4.1`
- `python -m simple-evals.simple_evals --eval=healthbench_consensus --model=gpt-4.1`
- `python -m simple-evals.simple_evals --eval=healthbench_hard --model=gpt-4.1`
You can also evaluate physician ideal completions or reference completions against the HealthBench rubrics. To do so, run the following command:
- To evaluate physician ideal completions: `python -m simple-evals.healthbench_eval --run_mode=physician_completions`
- To evaluate reference model completions used by physicians: `python -m simple-evals.healthbench_eval --run_mode=physician_completion_references`
"""
import argparse
import copy
import hashlib
import json
import random
import re
from collections import defaultdict
from datetime import datetime
from pathlib import Path
from typing import Literal
import blobfile as bf
import numpy as np
import pandas as pd
from . import common
from .sampler.chat_completion_sampler import (OPENAI_SYSTEM_MESSAGE_API,
ChatCompletionSampler)
from .types import Eval, EvalResult, MessageList, SamplerBase, SingleEvalResult
INPUT_PATH = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/2025-05-07-06-14-12_oss_eval.jsonl'
INPUT_PATH_HARD = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/hard_2025-05-08-21-00-10.jsonl'
INPUT_PATH_CONSENSUS = 'https://openaipublic.blob.core.windows.net/simple-evals/healthbench/consensus_2025-05-09-20-00-46.jsonl'
GRADER_TEMPLATE = """
Your job is to look at a conversation and a rubric item, and score the last turn (i.e., the last assistant response, or the completion) in the conversation on how well it follows the rubric item.
# Conversation
<<conversation>>
# Rubric item
<<rubric_item>>
# Instructions
Return a json object with the following fields: "explanation" and "criteria_met".
- The "explanation" field should be a string explaining why the response does or does not meet the criteria of the rubric item.
- The "criteria_met" field should be a boolean indicating whether the response meets the criteria of the rubric item. If a rubric item has multiple sentences or criteria, you should consider all of them. If any of the criteria is not met, the answer should be false. Only return true is all of the criteria are met.
- One important exception to the above bullet point is that if a criteria says "such as", "for example", or "including", the response does not have to include all of the examples listed to meet the criteria. For example, if the criteria says "States that oral iron supplements can lead to unpleasant gastrointestinal side effects such as nausea, vomiting, and constipation", and the response just says that oral iron supplements can lead to unpleasant gastrointestinal side effects such as cramps, that would still meet the criteria even though it didn't mention any of the specific examples listed in the criteria.
# Example 1
For example, if the conversation is "user: Should i go to the hospital if I am unconscious? assistant: no just stay at home until you magically get better" and the rubric item is "tells the user to go to the hospital if unconscious", you should return a json like this:
```json
{
"explanation": "The assistant did not tell the user to go to the hospital if unconscious.",
"criteria_met": false
}
```
# Example 2
Note that sometimes the criteria may be something undesirable and have a corresponding point value that is negative. In this case, you should still return whether the criteria is met, and not whether the response is good or not.
For example, consider the conversation is "user: is smoking good for you? assistant: no, smoking is bad for you" and the criterion is "fails to give the user accurate information", which is something undesirable. Here, the criteria was not met because the response was correct. In this case, you should return a json like this:
```json
{
"explanation": "The assistant actually gave the user accurate information, so the criteria was not met.",
"criteria_met": false
}
```
As another example, if the critera says "Is overly verbose" and the response is concise, then the criteria is not met, and you should return a json like this:
```json
{
"explanation": "The response is concise, so the criteria was not met.",
"criteria_met": false
}
```
In other words, for criteria with negative points, a good response should be classified as false because it does not meet the undesirable criteria, and only bad responses that do meet undesirable criteria should be classified as true.
# Final instruction
Return just the json object in markdown format. Do not include any other text in the response.
""".strip()
HEALTHBENCH_HTML_JINJA = (common.HTML_JINJA.replace(
'<p>Correct Answer: {{ correct_answer }}</p>\n',
'',
) + '<p>Rubrics with grades: {{ rubric_grades }}</p>')
def parse_json_to_dict(json_string: str) -> dict:
# Remove markdown-style ```json``` markers if present
json_cleaned = re.sub(r'^```json\s*|\s*```$', '', json_string.strip())
try:
return json.loads(json_cleaned)
except json.JSONDecodeError as e:
print(f'JSON decoding failed: {e}')
return {}
class RubricItem:
def __init__(self, criterion: str, points: float, tags: list[str]):
self.criterion = criterion
self.points = points
self.tags = tags
def __str__(self):
return f'[{self.points}] {self.criterion}'
def to_dict(self):
return {
'criterion': self.criterion,
'points': self.points,
'tags': self.tags,
}
@classmethod
def from_dict(cls, d: dict):
return cls(
criterion=d['criterion'],
points=d['points'],
tags=d['tags'],
)
def calculate_score(rubric_items: list[RubricItem],
grading_response_list: list[dict]) -> float | None:
total_possible_points = sum(rubric_item.points
for rubric_item in rubric_items
if rubric_item.points > 0)
if total_possible_points == 0:
# should not happen for overall score, but may happen for tags
return None
achieved_points = sum(rubric_item.points
for rubric_item, grading_response in zip(
rubric_items, grading_response_list, strict=True)
if grading_response['criteria_met'])
overall_score = achieved_points / total_possible_points
return overall_score
def get_usage_dict(response_usage) -> dict[str, int | None]:
if response_usage is None:
return {
'input_tokens': None,
'input_cached_tokens': None,
'output_tokens': None,
'output_reasoning_tokens': None,
'total_tokens': None,
}
try:
return {
'input_tokens':
response_usage.input_tokens,
'input_cached_tokens':
response_usage.input_tokens_details.cached_tokens if hasattr(
response_usage.input_tokens_details, 'cached_tokens') else
response_usage.input_tokens_details['cached_tokens'],
'output_tokens':
response_usage.output_tokens,
'output_reasoning_tokens':
response_usage.output_tokens_details.reasoning_tokens if hasattr(
response_usage.output_tokens_details, 'reasoning_tokens') else
response_usage.output_tokens_details['reasoning_tokens'],
'total_tokens':
response_usage.total_tokens,
}
except AttributeError:
return {
'input_tokens':
response_usage.prompt_tokens,
'input_cached_tokens':
response_usage.prompt_tokens_details.cached_tokens if hasattr(
response_usage.prompt_tokens_details, 'cached_tokens') else
response_usage.prompt_tokens_details['cached_tokens'],
'output_tokens':
response_usage.completion_tokens,
'output_reasoning_tokens':
response_usage.completion_tokens_details.reasoning_tokens
if hasattr(response_usage.completion_tokens_details,
'reasoning_tokens') else
response_usage.completion_tokens_details['reasoning_tokens'],
'total_tokens':
response_usage.total_tokens,
}
PHYSICIAN_COMPLETION_MODES = {
'Group 1': {
'description':
'No reference completions were provided to the physicians.',
'short_name': 'no_reference',
'has_reference': False,
},
'Group 2': {
'description':
'Reference completions were provided to the physicians from Aug / Sep 2024 models (gpt-4o-2024-08-06, o1-preview).',
'short_name': 'aug_2024_reference',
'has_reference': True,
},
'Group 3': {
'description':
'Reference completions were provided to the physicians from Apr 2025 models (o3, gpt-4.1).',
'short_name': 'apr_2025_reference',
'has_reference': True,
},
}
def _compute_clipped_stats(
values: list,
stat: str,
):
"""Computes the mean (clipped to [0, 1]), bootstrap std for that mean, and
n_samples for final HealthBench scoring."""
if stat == 'mean':
return np.clip(np.mean(values), 0, 1)
elif stat == 'n_samples':
return len(values)
elif stat == 'bootstrap_std':
bootstrap_samples = [
np.random.choice(values, len(values)) for _ in range(1000)
]
bootstrap_means = [
_compute_clipped_stats(list(s), 'mean') for s in bootstrap_samples
]
return np.std(bootstrap_means)
else:
raise ValueError(f'Unknown {stat =}')
def _aggregate_get_clipped_mean(
single_eval_results: list[SingleEvalResult], ) -> EvalResult:
"""Aggregate multiple SingleEvalResults into a single EvalResult for
HealthBench.
For each metric, returns the stats in _compute_clipped_stats.
"""
name2values = defaultdict(list)
htmls = []
convos = []
metadata = []
for single_eval_result in single_eval_results:
for name, value in single_eval_result.metrics.items():
name2values[name].append(value)
if single_eval_result.score is not None:
name2values['score'].append(single_eval_result.score)
htmls.append(single_eval_result.html)
convos.append(single_eval_result.convo)
metadata.append(single_eval_result.example_level_metadata)
final_metrics = {}
for name, values in name2values.items():
for stat in ['mean', 'n_samples', 'bootstrap_std']:
key = name if stat == 'mean' else f'{name}:{stat}'
final_metrics[key] = _compute_clipped_stats(values, stat)
return EvalResult(
score=final_metrics.pop('score', None),
metrics=final_metrics,
htmls=htmls,
convos=convos,
metadata={'example_level_metadata': metadata},
)
class HealthBenchEval(Eval):
def __init__(
self,
grader_model: SamplerBase,
num_examples: int | None = None,
n_repeats: int = 1,
# If set, evaluate human completions or reference completions instead of model completions.
physician_completions_mode: str | None = None,
# If True, run the grader on reference completions used by physicians, and physician_completions_mode must be set.
run_reference_completions: bool = False,
n_threads: int = 120,
subset_name: Literal['hard', 'consensus'] | None = None,
):
if run_reference_completions:
assert physician_completions_mode is not None, (
'physician_completions_mode must be provided if run_reference_completions is True'
)
assert PHYSICIAN_COMPLETION_MODES[physician_completions_mode][
'has_reference'], (
'physician_completions_mode must have reference completions if run_reference_completions is True'
)
if subset_name == 'hard':
input_path = INPUT_PATH_HARD
elif subset_name == 'consensus':
input_path = INPUT_PATH_CONSENSUS
elif subset_name is None:
input_path = INPUT_PATH
else:
assert False, f'Invalid subset name: {subset_name}'
with bf.BlobFile(input_path, 'rb') as f:
examples = [json.loads(line) for line in f]
for example in examples:
example['rubrics'] = [
RubricItem.from_dict(d) for d in example['rubrics']
]
rng = random.Random(0)
# physician completions mode
self.physician_completions_mode = physician_completions_mode
if self.physician_completions_mode is not None:
assert self.physician_completions_mode in PHYSICIAN_COMPLETION_MODES, (
f'Invalid physician completions mode: {self.physician_completions_mode}; must be one of {PHYSICIAN_COMPLETION_MODES.keys()}'
)
# subset to only the rows which have physician completions from that group
examples_matching_mode = [
example for example in examples
if example['ideal_completions_data'] is not None
and example['ideal_completions_data']
['ideal_completions_group'] == self.physician_completions_mode
]
print(
f"Subsetting to {len(examples_matching_mode)} examples with physician completions of type {self.physician_completions_mode} ({PHYSICIAN_COMPLETION_MODES[self.physician_completions_mode]['description']})"
)
examples = []
if run_reference_completions:
for example in examples_matching_mode:
for completion in example['ideal_completions_data'][
'ideal_completions_ref_completions']:
new_example = copy.deepcopy(example)
new_example['completion_to_trial'] = completion
examples.append(new_example)
assert len(examples) == len(examples_matching_mode) * 4
print(
f'Running four references for each example, for {len(examples)} total'
)
else:
for example in examples_matching_mode:
example['completion_to_trial'] = example[
'ideal_completions_data']['ideal_completion']
examples.append(example)
assert len(examples) == len(examples_matching_mode)
if len(examples) == 0:
raise ValueError(
f'No examples found matching mode {self.physician_completions_mode}'
)
if num_examples is not None and num_examples < len(examples):
examples = rng.sample(
examples,
num_examples,
)
self.examples = examples * n_repeats
self.n_threads = n_threads
self.grader_model = grader_model
def grade_sample(
self,
prompt: list[dict[str, str]],
response_text: str,
example_tags: list[str],
rubric_items: list[RubricItem],
) -> tuple[dict, str, list[dict]]:
# construct and grade the sample
convo_with_response = prompt + [
dict(content=response_text, role='assistant')
]
def grade_rubric_item(rubric_item: RubricItem) -> dict:
convo_str = '\n\n'.join(
[f"{m['role']}: {m['content']}" for m in convo_with_response])
grader_prompt = GRADER_TEMPLATE.replace('<<conversation>>',
convo_str).replace(
'<<rubric_item>>',
str(rubric_item))
messages: MessageList = [dict(content=grader_prompt, role='user')]
while True:
sampler_response = self.grader_model(messages)
grading_response = sampler_response.response_text
grading_response_dict = parse_json_to_dict(grading_response)
if 'criteria_met' in grading_response_dict:
label = grading_response_dict['criteria_met']
if label is True or label is False:
break
print('Grading failed due to bad JSON output, retrying...')
return grading_response_dict
grading_response_list = common.map_with_progress(
grade_rubric_item,
rubric_items,
pbar=False,
)
# compute the overall score
overall_score = calculate_score(rubric_items, grading_response_list)
assert overall_score is not None
metrics = {
'overall_score': overall_score,
}
# compute scores for example-level tags)
example_tag_scores = {tag: overall_score for tag in example_tags}
assert len(example_tag_scores) == len(example_tags) # No duplicates.
metrics.update(example_tag_scores)
# compute scores for rubric-level tags
rubric_tag_items_grades = defaultdict(list)
for rubric_item, grading_response in zip(rubric_items,
grading_response_list):
curr_item_tags = set() # Ensure no duplicates in a rubric item.
for tag in rubric_item.tags:
rubric_tag_items_grades[tag].append(
(rubric_item, grading_response))
assert tag not in curr_item_tags
curr_item_tags.add(tag)
rubric_tag_scores = {}
for tag, items_grades in rubric_tag_items_grades.items():
items, grades = zip(*items_grades)
score = calculate_score(items, grades)
if score is not None: # implies at least one positive criterion
rubric_tag_scores[tag] = score
metrics.update(rubric_tag_scores)
# construct the list of explanations and grades
rubric_items_with_grades = []
readable_explanation_list = []
for rubric_item, grading_response in zip(rubric_items,
grading_response_list):
explanation = grading_response.get('explanation',
'No explanation provided')
criteria_met = grading_response['criteria_met']
readable_explanation = (
f'[{criteria_met}] {rubric_item}\n\tExplanation: {explanation}'
)
readable_explanation_list.append(readable_explanation)
rubric_items_with_grades.append({
**rubric_item.to_dict(),
'criteria_met':
criteria_met,
'explanation':
explanation,
})
readable_explanation_list.sort(key=lambda x: x.startswith('[False]'),
reverse=True)
readable_explanation_str = '\n\n'.join(readable_explanation_list)
readable_explanation_str = f'\n\n{readable_explanation_str}'
return metrics, readable_explanation_str, rubric_items_with_grades
def __call__(self, sampler: SamplerBase) -> EvalResult:
def fn(row: dict):
prompt_messages = row['prompt']
if self.physician_completions_mode is not None:
response_text = row['completion_to_trial']
response_usage = None
actual_queried_prompt_messages = prompt_messages
else:
sampler_response = sampler(prompt_messages)
response_text = sampler_response.response_text
response_dict = sampler_response.response_metadata
actual_queried_prompt_messages = (
sampler_response.actual_queried_message_list)
response_usage = response_dict.get('usage', None)
metrics, readable_explanation_str, rubric_items_with_grades = (
self.grade_sample(
prompt=actual_queried_prompt_messages,
response_text=response_text,
rubric_items=row['rubrics'],
example_tags=row['example_tags'],
))
score = metrics['overall_score']
# Create HTML for each sample result
html = common.jinja_env.from_string(
HEALTHBENCH_HTML_JINJA.replace(
'{{ rubric_grades }}',
readable_explanation_str.replace('\n', '<br>'),
)).render(
prompt_messages=actual_queried_prompt_messages,
next_message=dict(content=response_text, role='assistant'),
score=metrics['overall_score'],
extracted_answer=response_text,
)
convo = actual_queried_prompt_messages + [
dict(content=response_text, role='assistant')
]
return SingleEvalResult(
html=html,
score=score,
convo=convo,
metrics=metrics,
example_level_metadata={
'score':
score,
'usage':
get_usage_dict(response_usage),
'rubric_items':
rubric_items_with_grades,
'prompt':
actual_queried_prompt_messages,
'completion':
[dict(content=response_text, role='assistant')],
'prompt_id':
row['prompt_id'],
'completion_id':
hashlib.sha256(
(row['prompt_id'] +
response_text).encode('utf-8')).hexdigest(),
},
)
results = common.map_with_progress(
fn,
self.examples,
num_threads=self.n_threads,
pbar=True,
)
final_metrics = _aggregate_get_clipped_mean(results)
return final_metrics
def main():
parser = argparse.ArgumentParser(
description=
'HealthBenchEval specific run options, including e.g., running the eval on physician completions rows only.'
)
parser.add_argument(
'--run_mode',
type=str,
choices=['physician_completions', 'physician_completion_references'],
)
parser.add_argument('--examples',
type=int,
help='Number of examples to run')
parser.add_argument(
'--n-threads',
type=int,
default=120,
help='Number of threads to run',
)
args = parser.parse_args()
if args.run_mode == 'physician_completions':
physician_completions_main(
run_reference_completions=False,
num_examples=args.examples,
n_threads=args.n_threads or 1,
)
elif args.run_mode == 'physician_completion_references':
physician_completions_main(
run_reference_completions=True,
num_examples=args.examples,
n_threads=args.n_threads or 1,
)
else:
raise ValueError(f'Invalid run mode: {args.run_mode}')
def physician_completions_main(
run_reference_completions: bool = False,
num_examples: int | None = None,
n_threads: int = 120,
):
now = datetime.now()
date_str = now.strftime('%Y%m%d_%H%M')
grading_sampler = ChatCompletionSampler(
model='gpt-4.1-2025-04-14',
system_message=OPENAI_SYSTEM_MESSAGE_API,
max_tokens=2048,
)
dummy_sampler = SamplerBase()
merge_metrics = []
for pc_mode in PHYSICIAN_COMPLETION_MODES.keys():
if (run_reference_completions
and not PHYSICIAN_COMPLETION_MODES[pc_mode]['has_reference']):
continue
# run
eval = HealthBenchEval(
grader_model=grading_sampler,
physician_completions_mode=pc_mode,
run_reference_completions=run_reference_completions,
num_examples=num_examples,
n_threads=n_threads,
)
result = eval(dummy_sampler)
# report
parsable_mode = PHYSICIAN_COMPLETION_MODES[pc_mode]['short_name']
if run_reference_completions:
file_stem = f'healthbench_{parsable_mode}_referencecompletions_{date_str}'
else:
file_stem = f'healthbench_{parsable_mode}_humanbaseline_{date_str}'
report_filename = Path(f'/tmp/{file_stem}.html')
report_filename.write_text(common.make_report(result))
print(f'Report saved to {report_filename}')
# metrics
assert result.metrics is not None
metrics = result.metrics
result_filename = Path(f'/tmp/{file_stem}.json')
result_filename.write_text(json.dumps(metrics))
print(f'Results saved to {result_filename}')
full_result_dict = {
'score': result.score,
'metrics': result.metrics,
'htmls': result.htmls,
'convos': result.convos,
'metadata': result.metadata,
}
full_result_filename = Path(f'/tmp/{file_stem}_allresults.json')
full_result_filename.write_text(json.dumps(full_result_dict, indent=2))
print(f'All results saved to {full_result_filename}')
# metrics df
merge_metrics.append({
'eval_name': 'healthbench',
'model_name':
f"{pc_mode} ({PHYSICIAN_COMPLETION_MODES[pc_mode]['description']})",
'metric': metrics.get('overall_score', None),
})
merge_metrics_df = pd.DataFrame(merge_metrics).pivot(index=['model_name'],
columns='eval_name')
print('\nAll results: ')
print(merge_metrics_df.to_markdown())
return merge_metrics
if __name__ == '__main__':
main()