diff --git a/examples/eval_OpenHuEval_HuProverbRea_2CQ.py b/examples/eval_OpenHuEval_HuProverbRea_2CQ.py index e4be01bf..bc2e11cf 100644 --- a/examples/eval_OpenHuEval_HuProverbRea_2CQ.py +++ b/examples/eval_OpenHuEval_HuProverbRea_2CQ.py @@ -30,6 +30,12 @@ for model in models: 'type': 'rm__before_eval' } } + if model['abbr'].startswith('QwQ'): + model['pred_postprocessor'] = { + 'OpenHuEval_*': { + 'type': 'extract_qwq_answer_before_eval_for_huproverbrea' + } + } del model work_dir = './outputs/' + __file__.split('/')[-1].split('.')[0] + '/' # do NOT modify this line, yapf: disable, pylint: disable diff --git a/examples/eval_OpenHuEval_HuProverbRea_OE.py b/examples/eval_OpenHuEval_HuProverbRea_OE.py index bf6ed8ed..137dc428 100644 --- a/examples/eval_OpenHuEval_HuProverbRea_OE.py +++ b/examples/eval_OpenHuEval_HuProverbRea_OE.py @@ -33,7 +33,7 @@ for model in models: if model['abbr'].startswith('QwQ'): model['pred_postprocessor'] = { 'OpenHuEval_*': { - 'type': 'extract_qwq_answer_before_eval' + 'type': 'extract_qwq_answer_before_eval_for_huproverbrea' } } del model diff --git a/examples/eval_OpenHuEval_HuStandardFIB.py b/examples/eval_OpenHuEval_HuStandardFIB.py index 2b0336e7..6f2a239d 100644 --- a/examples/eval_OpenHuEval_HuStandardFIB.py +++ b/examples/eval_OpenHuEval_HuStandardFIB.py @@ -15,7 +15,8 @@ with read_base(): from opencompass.configs.models.hf_internlm.lmdeploy_internlm3_8b_instruct import models as lmdeploy_internlm3_8b_instruct_model from opencompass.configs.models.qwq.lmdeploy_qwq_32b_preview import models as lmdeploy_qwq_32b_preview_model - from opencompass.configs.models.deepseek.deepseek_r1_api_aliyun import models as deepseek_r1_api_aliyun_model + # from opencompass.configs.models.deepseek.deepseek_r1_api_aliyun import models as deepseek_r1_api_aliyun_model + from opencompass.configs.models.deepseek.deepseek_r1_api_siliconflow import models as deepseek_r1_api_siliconflow_model from opencompass.configs.models.openai.o1_mini_2024_09_12 import models as o1_mini_2024_09_12_model # from opencompass.configs.models.openai.o3_mini_2025_01_31 import models as o3_mini_2025_01_31_model @@ -30,6 +31,12 @@ for model in models: 'type': 'rm__before_eval' } } + if model['abbr'].startswith('QwQ'): + model['pred_postprocessor'] = { + 'OpenHuEval_*': { + 'type': 'extract_qwq_answer_before_eval_for_hustandardfib' + } + } del model work_dir = './outputs/' + __file__.split('/')[-1].split('.')[0] + '/' # do NOT modify this line, yapf: disable, pylint: disable diff --git a/opencompass/utils/text_postprocessors.py b/opencompass/utils/text_postprocessors.py index b8aa8924..c49874f3 100644 --- a/opencompass/utils/text_postprocessors.py +++ b/opencompass/utils/text_postprocessors.py @@ -243,7 +243,8 @@ def remove_reasoning_part_before_evaluation(text: str): return text -@TEXT_POSTPROCESSORS.register_module('extract_qwq_answer_before_eval') +@TEXT_POSTPROCESSORS.register_module( + 'extract_qwq_answer_before_eval_for_huproverbrea') def extract_answer_before_evaluation(text: str): """Overall, there are three situations in responses of QWQ: @@ -344,4 +345,41 @@ def extract_answer_before_evaluation(text: str): answer = '\n\n'.join(text_split[max(ans_start_idx - 1, 0):]) - return answer, has_answer + return answer + + +@TEXT_POSTPROCESSORS.register_module( + 'extract_qwq_answer_before_eval_for_hustandardfib') +def extract_answer_before_evaluation(text: str): + """The format of the answer from QwQ when inferring HuSimpleQA is \ + different with others models due to the special prompt.""" + max_sentence_len = 70 + if len(re.findall(r'\n\n', text)) > 2: + split_mark = '\n\n' + else: + split_mark = '\n' + text_split = text.split(split_mark) + last_try_idx = max(len(text_split) - max_sentence_len, 0) + ans_start_idx = last_try_idx + has_answer = False + answer_flags = [ + '#0#', '#0', 'summar', 'Final Answer', 'final answer', 'Final\nAnswer' + ] + + for idx, s in enumerate(reversed(text_split)): + sen_idx = len(text_split) - 1 - idx + if sen_idx < last_try_idx: + break + + for af in answer_flags: + if af in s: + has_answer = True + break + + if has_answer: + ans_start_idx = sen_idx + break + + answer = '\n\n'.join(text_split[max(ans_start_idx - 1, 0):]) + + return answer