add HuSimpleQA

This commit is contained in:
wujiang 2025-02-10 21:20:39 +08:00 committed by jxd
parent 5741e38310
commit 1e1acf9236
4 changed files with 362 additions and 0 deletions

View File

@ -0,0 +1,35 @@
from mmengine.config import read_base
with read_base():
from opencompass.configs.datasets.OpenHuEval.HuSimpleQA.HuSimpleQA import HuSimpleQA_datasets
# from opencompass.configs.models.openai.gpt_4o_mini_20240718 import models as gpt_4o_mini_20240718_model
# from opencompass.configs.models.openai.gpt_4o_2024_11_20 import models as gpt_4o_2024_11_20_model
# from opencompass.configs.models.deepseek.deepseek_v3_api_aliyun import models as deepseek_v3_api_aliyun_model
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_7b_instruct import models as lmdeploy_qwen2_5_7b_instruct_model
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_72b_instruct import models as lmdeploy_qwen2_5_72b_instruct_model
from opencompass.configs.models.hf_llama.lmdeploy_llama3_1_8b_instruct import models as lmdeploy_llama3_1_8b_instruct_model
from opencompass.configs.models.hf_llama.lmdeploy_llama3_1_70b_instruct import models as lmdeploy_llama3_1_70b_instruct_model
from opencompass.configs.models.hf_internlm.lmdeploy_internlm3_8b_instruct import models as lmdeploy_internlm3_8b_instruct_model
from opencompass.configs.models.qwq.lmdeploy_qwq_32b_preview import models as lmdeploy_qwq_32b_preview_model
# from opencompass.configs.models.deepseek.deepseek_r1_api_aliyun import models as deepseek_r1_api_aliyun_model
# from opencompass.configs.models.openai.o1_mini_2024_09_12 import models as o1_mini_2024_09_12_model
# from opencompass.configs.models.openai.o3_mini_2025_01_31 import models as o3_mini_2025_01_31_model
datasets = HuSimpleQA_datasets
models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
for model in models:
if model['abbr'].startswith('deepseek_r1_api_'):
model['return_reasoning_content'] = True
model['pred_postprocessor'] = {
'OpenHuEval_*': {
'type': 'rm_<think>_before_eval'
}
}
del model
work_dir = './outputs/' + __file__.split('/')[-1].split('.')[0] + '/' # do NOT modify this line, yapf: disable, pylint: disable

View File

@ -0,0 +1,51 @@
from mmengine.config import read_base
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets.OpenHuEval.HuSimpleQA import HuSimpleQADataset, HuSimpleQAEvaluator
with read_base():
from .HuSimpleQA_setting import INSTRUCTIONS, DATA_PATH, DATA_VERSION, JUDGE_PROMPT
PROMPT_LANGUAGES = [
'en',
'hu',
]
HuSimpleQA_reader_cfg = dict(input_columns=['question', 'hu_specific_dim'],
output_column='reference')
HuSimpleQA_datasets = []
for lang in PROMPT_LANGUAGES:
instruction = INSTRUCTIONS[lang]
HuSimpleQA_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin='</E>',
round=[
dict(role='HUMAN', prompt=instruction),
],
),
ice_token='</E>',
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer),
)
HuSimpleQA_eval_cfg = dict(evaluator=dict(
type=HuSimpleQAEvaluator,
judge_prompt_template=JUDGE_PROMPT,
))
HuSimpleQA_datasets.append(
dict(
abbr=f'OpenHuEval_HuSimpleQA_{DATA_VERSION}-prompt_{lang}',
type=HuSimpleQADataset,
filepath=DATA_PATH,
reader_cfg=HuSimpleQA_reader_cfg,
infer_cfg=HuSimpleQA_infer_cfg,
eval_cfg=HuSimpleQA_eval_cfg,
))

View File

@ -0,0 +1,135 @@
INSTRUCTIONS = {
'en':
"""Question: {question}
Please provide your best answer to this question in Hungarian and indicate your confidence in your answer using a score from 0 to 100. Please provide your response in the following JSON format:
{
"answer": "Your answer here",
"confidence_score": number
}
""",
'hu':
"""Kérdés: {question}
Kérjük, magyar nyelven adja meg a legjobb választ erre a kérdésre, és 0-tól 100-ig terjedő pontszámmal jelezze, hogy bízik a válaszában. Kérjük, válaszát a következő JSON formátumban adja meg:
{
"answer": "Az Ön válasza itt",
"confidence_score": szám
}
"""
}
JUDGE_PROMPT = {
'system_prompt':
"""
- Role: Hungarian General Knowledge Quiz Review Expert
- Background: Hungarian General Knowledge Quiz involves many aspects of Hungarian history, culture, geography, politics, people, etc., and a professional review expert is needed to ensure the accuracy of the answers.
- Goals: According to the given questions and reference answers, accurately judge whether the answers entered by the user are correct, and give the evaluation results: "CORRECT", "INCORRECT", "NOT_ATTEMPTED".
- Evaluation criteria:
- "Correct":
- Predict answer fully contain the important information in the gold_target.
- Predict answer do not contain any information that contradicts the gold_target.
- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
- Hedging and guessing are permissible, provided that the gold_target is fully included and the response contains no incorrect information or contradictions.
- The answer must be consistent with the scope of the question. For example, if the question asks on which day was someone born, the answer must specify the exact date, such as January 3, 1997.
- "Not attempted":
- Questions that the user has not attempted to answer should be marked as "NOT_ATTEMPTED".
- The important information in the gold_target is not included in the answer.
- No statements in the answer contradict the gold_target.
- Workflow:
1. Receive questions, reference answers, and user answers.
2. Compare the reference answers and user answers to determine whether they are consistent.
3. Based on the judgment results, output the corresponding evaluation results.
- Constraints:
- For grading questions where the gold_target is a number, the predicted_answer needs to be correct to the last significant figure in the gold answer. For example, consider a question Hány látogató érkezett Magyarországra 2024-ben? with gold_target 440k.
- predicted_answers 440k, 444k, and 435k are all CORRECT.
- predicted_answers 400k and 413k are INCORRECT.
- predicted_answers körülbelül 400k and több mint 300k are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold_target.
- The gold_target may contain more information than the question. In such cases, the predicted_answer only needs to contain the information that is in the question.
- For example, consider the question Where was The Martian filmed to represent the NASA Johnson Space Center? with the gold_target Budapesti Bálna (HU). Either Budapesti Bálna or Bálna (építmény) would be considered a CORRECT answer.
- Do not punish predicted_answers if they omit information that would be clearly inferred from the question.
- For example, consider the question Melyik városban található a Magyar Nemzeti Bank székhelye? with the gold_target Budapest, Magyarország. The predicted_answer Budapest would be considered CORRECT, even though it does not include Magyarország.
- Consider the question Milyen díjat nyert a Napsugár című magyar film a 2018-as Velencei Filmfesztiválon? with the gold_target Legjobb Forgatókönyv Díja. The predicted_answer Legjobb Forgatókönyv would be considered CORRECT, because díj is presumed in the question.
- For the question Mi Dominik Szoboszlai magassága méterben? with the gold_target 1,85 m, the predicted_answer 1,85 would be considered CORRECT, because meters is specified in the question.
- For the question Mi Magyarország első női miniszterelnökének neve? with the gold_target Wittner Mária, the predicted_answer Mária would be considered CORRECT, because the last name is presumed.
- Do not punish for typos in peoples names if its clearly the same name.
- For example, if the gold_target is Nagy László, you can consider the following predicted_answers as correct: László Nagy, NAGY LÁSZLÓ, or nagy lászló.
Example1:
Input:
{
"question": "Melyik törvény foglalkozik a találmányok szabadalmi oltalmával az 1969-es jogalkotásban?",
"gold_target": "1969. évi II. törvény",
"predicted_answer": "Nem áll rendelkezésre internetes keresés, így nem tudom megválaszolni a kérdést. Azonban 1969-ben valóban elfogadták a szabadalmi védelmi törvényt."
}
Output:
{
"evaluation": "NOT_ATTEMPTED"
}
Example2:
Input:
{
"question": "Melyik városban született Tisza Lajos 1832. szeptember 12-én?",
"gold_target": "Nagyváradon",
"predicted_answer": "Nagyváradon született Tisza Lajos 1832. szeptember 12-én."
}
Output: # Although the answer is long, it accurately answers the question
{
"evaluation": "CORRECT"
}
Example3:
Input:
{
"question": "Melyik olimpián vett részt Bogár Pál kosárlabda-játékosként?",
"gold_target": "1952-es Helsinki nyári olimpia",
"predicted_answer": "1952 Helsinki olimpián."
}
Output: # The descriptions are slightly different, but they all refer to the same Olympic Games, so they are considered correct
{
"evaluation": "CORRECT"
}
Example4:
Input:
{
"question": "Melyik labdarúgócsapat kötődik Budapest XIX. kerületéhez, amely 14-szeres magyar bajnok?",
"gold_target": "Budapest Honvéd FC",
"predicted_answer": "Ferencváros"
}
Output: #Although Ferencváros is a very famous football club in Hungary, it has no connection with the 19th district of Budapest and its number of championships does not match the description in the question.
{
"evaluation": "INCORRECT"
}
Example5:
Input:
{
"question": "Milyen biztosítás bevezetését szabályozta egy 1952-es törvényerejű rendelet Magyarországon?",
"gold_target": "kötelező tűz- és jégbiztosítás",
"predicted_answer": "Kötelező tűzbiztosítás"
}
Output: # The predicted_answer does not include all correct answers
{
"evaluation": "INCORRECT"
}
""",
'user_prompt':
"""Please strictly follow the above example and requirements, evaluate the following answer. Input:
{{
"question": {question},
"gold_target": {answer},
"predicted_answer": {pred_answer}
}}
Please respond strictly in JSON format. Do not include any additional text outside the JSON structure.
Output:
{{
"evaluation":"Correct"/"Incorrect"/"NOT_ATTEMPTED"
}}
"""
}
OpenHuEval_Path = '/mnt/hwfile/opendatalab/weixingjian/OpenHuEval'
DATA_VERSION = '250208'
DATA_PATH = f'{OpenHuEval_Path}/data/HuSimpleQA/HuSimpleQA_{DATA_VERSION}/HuSimpleQA.jsonl'

View File

@ -0,0 +1,141 @@
import json
import os
import re
from datasets import Dataset, DatasetDict
from opencompass.openicl.icl_evaluator import BaseEvaluator
from opencompass.utils.prompt import PromptList
from ..base import BaseDataset
class HuSimpleQADataset(BaseDataset):
@staticmethod
def load(filepath):
assert os.path.isfile(filepath)
assert filepath.endswith('.jsonl')
dataset = DatasetDict()
f = open(filepath, 'r', encoding='utf-8')
lines = f.readlines()
objs = []
for line in lines:
obj = json.loads(line)
objs.append(obj)
out_dict_list = []
for obj in objs:
question = obj['question']
hu_specific_dim = obj['hu_specific_dim']
tmp = obj
new_obj = dict(question=question,
hu_specific_dim=hu_specific_dim,
reference=tmp)
out_dict_list.append(new_obj)
dataset = Dataset.from_list(out_dict_list)
return dataset
class HuSimpleQAEvaluator(BaseEvaluator):
def __init__(self,
judge_prompt_template,
openai_key='ENV',
openai_proxy_url='ENV',
**kwargs):
super().__init__(**kwargs)
self.judge_prompt_template = judge_prompt_template
self.openai_key = openai_key
self.openai_proxy_url = openai_proxy_url
def score(self, predictions, references, origin_prompt) -> dict:
if len(predictions) != len(references):
return {'error': 'preds and refrs have different length.'}
details = {}
total, correct, wrong, not_attempted, failed_to_parse = 0, 0, 0, 0, 0
from opencompass.models import OpenAI
model = OpenAI(path='gpt-4o-2024-08-06',
key=self.openai_key,
openai_proxy_url=self.openai_proxy_url,
max_seq_len=8192,
retry=2,
temperature=0,
verbose=True)
confidence_scores = []
for raw_pred, detail in zip(predictions, references):
total += 1
qid = detail['qid']
details[qid] = {
'question': detail['question'],
'answer': detail['answer'],
'raw_pred': raw_pred,
'correctness': False,
'failed_to_parse': False
}
# parse raw_pred
try:
raw_pred = re.sub(r'^```json\n|\n```$', '', raw_pred)
raw_pred_json = json.loads(raw_pred)
confidence_score = raw_pred_json.get('confidence_score', None)
except json.JSONDecodeError:
confidence_score = None
details[qid]['confidence_score'] = confidence_score
# ------------------------ involve openai gpt4o as judge
user_prompt = self.judge_prompt_template['user_prompt'].format(
question=detail['question'],
answer=detail['answer'],
pred_answer=raw_pred)
system_prompt = self.judge_prompt_template['system_prompt']
details[qid]['judge_user_prompt'] = user_prompt
messages = PromptList([{
'role': 'SYSTEM',
'prompt': system_prompt,
}, {
'role': 'HUMAN',
'prompt': user_prompt,
}])
response = model._generate(input=messages,
max_out_len=8192,
temperature=0.1)
details[qid]['judge_resp'] = response
try:
response = re.sub(r'^```json\n|\n```$', '', response)
evaluation_result = json.loads(response)
evaluation = evaluation_result.get('evaluation', '').lower()
details[qid]['correctness'] = (evaluation == 'correct')
details[qid]['failed_to_parse'] = False
if evaluation == 'correct':
correct += 1
elif evaluation == 'incorrect':
wrong += 1
elif evaluation == 'not_attempted':
not_attempted += 1
else:
failed_to_parse += 1
except json.JSONDecodeError:
details[qid]['failed_to_parse'] = True
failed_to_parse += 1
confidence_scores.append(
(confidence_score, details[qid]['correctness']))
accuracy = correct / total if total > 0 else 0
results = {
'accuracy': accuracy,
'total': total,
'correct': correct,
'wrong': wrong,
'not_attempted': not_attempted,
'failed_to_parse': failed_to_parse,
'details': details,
'confidence_scores': confidence_scores
}
return results