2025-03-12 21:58:58 +08:00
from opencompass . openicl . icl_prompt_template import PromptTemplate
2025-03-17 17:40:57 +08:00
from opencompass . openicl . icl_retriever import ZeroRetriever
2025-03-12 21:58:58 +08:00
from opencompass . openicl . icl_inferencer import GenInferencer
from opencompass . openicl . icl_evaluator import AccwithDetailsEvaluator
2025-03-17 17:40:57 +08:00
from opencompass . datasets import HellaswagDatasetwithICE
2025-03-12 21:58:58 +08:00
from opencompass . utils . text_postprocessors import first_option_postprocess
from opencompass . evaluator import GenericLLMEvaluator
2025-03-17 17:40:57 +08:00
from opencompass . datasets import generic_llmjudge_postprocess
2025-03-12 21:58:58 +08:00
hellaswag_reader_cfg = dict (
input_columns = [ ' ctx ' , ' A ' , ' B ' , ' C ' , ' D ' ] ,
output_column = ' label ' ,
train_split = ' train ' ,
test_split = ' val ' ,
)
2025-03-17 17:40:57 +08:00
align_prompt = """ Continue the following text without adding any additional information or formatting:
{ ctx }
A ) { A }
B ) { B }
C ) { C }
D ) { D }
What is the right option ? ' " " "
2025-03-12 21:58:58 +08:00
GRADER_TEMPLATE = """
Please as a grading expert , judge whether the final answers given by the candidates below are consistent with the standard answers , that is , whether the candidates answered correctly .
Here are some evaluation criteria :
1. Please refer to the given standard answer . You don ' t need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate ' s answer is consistent with the standard answer according to the form of the question . Don ' t try to answer the original question. You can assume that the standard answer is definitely correct.
2. Because the candidate ' s answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate ' s answer is correct , but be careful not to try to answer the original question .
3. Some answers may contain multiple items , such as multiple - choice questions , multiple - select questions , fill - in - the - blank questions , etc . As long as the answer is the same as the standard answer , it is enough . For multiple - select questions and multiple - blank fill - in - the - blank questions , the candidate needs to answer all the corresponding options or blanks correctly to be considered correct .
4. Some answers may be expressed in different ways , such as some answers may be a mathematical expression , some answers may be a textual description , as long as the meaning expressed is the same . And some formulas are expressed in different ways , but they are equivalent and correct .
Please judge whether the following answers are consistent with the standard answer based on the above criteria . Grade the predicted answer of this new question as one of :
A : CORRECT
B : INCORRECT
Just return the letters " A " or " B " , with no text around it .
Here is your task . Simply reply with either CORRECT , INCORRECT . Don ' t apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
2025-03-17 17:40:57 +08:00
< Original Question Begin > : { ctx } \n A ) { A } \n B ) { B } \n C ) { C } \n D ) { D } \n < Original Question End > \n \n
< Gold Target Begin > : \n { label } \n < Gold Target End > \n \n
2025-03-12 21:58:58 +08:00
< Predicted Answer Begin > : \n { prediction } \n < Predicted End > \n \n
Judging the correctness of candidates ' answers:
""" .strip()
hellaswag_infer_cfg = dict (
prompt_template = dict (
type = PromptTemplate ,
template = dict (
round = [
2025-03-17 17:40:57 +08:00
dict ( role = ' HUMAN ' , prompt = align_prompt ) ,
2025-03-12 21:58:58 +08:00
] ,
) ,
) ,
2025-03-17 17:40:57 +08:00
retriever = dict ( type = ZeroRetriever ) ,
2025-03-12 21:58:58 +08:00
inferencer = dict ( type = GenInferencer ) ,
)
hellaswag_eval_cfg = dict (
evaluator = dict (
type = GenericLLMEvaluator ,
prompt_template = dict (
type = PromptTemplate ,
template = dict (
begin = [
dict (
role = ' SYSTEM ' ,
fallback_role = ' HUMAN ' ,
2025-03-17 17:40:57 +08:00
prompt = " You are a helpful assistant who evaluates the correctness and quality of models ' outputs. " ,
)
2025-03-12 21:58:58 +08:00
] ,
round = [
2025-03-17 17:40:57 +08:00
dict ( role = ' HUMAN ' , prompt = GRADER_TEMPLATE ) ,
] ,
) ,
2025-03-12 21:58:58 +08:00
) ,
dataset_cfg = dict (
type = HellaswagDatasetwithICE ,
path = ' opencompass/hellaswag_ice ' ,
reader_cfg = hellaswag_reader_cfg ,
) ,
judge_cfg = dict ( ) ,
dict_postprocessor = dict ( type = generic_llmjudge_postprocess ) ,
) ,
)
hellaswag_datasets = [
dict (
abbr = ' hellaswag ' ,
type = HellaswagDatasetwithICE ,
path = ' opencompass/hellaswag_ice ' ,
reader_cfg = hellaswag_reader_cfg ,
infer_cfg = hellaswag_infer_cfg ,
eval_cfg = hellaswag_eval_cfg ,
)
]