mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Feature] Support answer extraction of QwQ when evaluating HuStandardFIB (#36)
This commit is contained in:
parent
9676d99787
commit
c7e89aa3db
@ -30,6 +30,12 @@ for model in models:
|
|||||||
'type': 'rm_<think>_before_eval'
|
'type': 'rm_<think>_before_eval'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if model['abbr'].startswith('QwQ'):
|
||||||
|
model['pred_postprocessor'] = {
|
||||||
|
'OpenHuEval_*': {
|
||||||
|
'type': 'extract_qwq_answer_before_eval_for_huproverbrea'
|
||||||
|
}
|
||||||
|
}
|
||||||
del model
|
del model
|
||||||
|
|
||||||
work_dir = './outputs/' + __file__.split('/')[-1].split('.')[0] + '/' # do NOT modify this line, yapf: disable, pylint: disable
|
work_dir = './outputs/' + __file__.split('/')[-1].split('.')[0] + '/' # do NOT modify this line, yapf: disable, pylint: disable
|
||||||
|
@ -33,7 +33,7 @@ for model in models:
|
|||||||
if model['abbr'].startswith('QwQ'):
|
if model['abbr'].startswith('QwQ'):
|
||||||
model['pred_postprocessor'] = {
|
model['pred_postprocessor'] = {
|
||||||
'OpenHuEval_*': {
|
'OpenHuEval_*': {
|
||||||
'type': 'extract_qwq_answer_before_eval'
|
'type': 'extract_qwq_answer_before_eval_for_huproverbrea'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
del model
|
del model
|
||||||
|
@ -15,7 +15,8 @@ with read_base():
|
|||||||
from opencompass.configs.models.hf_internlm.lmdeploy_internlm3_8b_instruct import models as lmdeploy_internlm3_8b_instruct_model
|
from opencompass.configs.models.hf_internlm.lmdeploy_internlm3_8b_instruct import models as lmdeploy_internlm3_8b_instruct_model
|
||||||
|
|
||||||
from opencompass.configs.models.qwq.lmdeploy_qwq_32b_preview import models as lmdeploy_qwq_32b_preview_model
|
from opencompass.configs.models.qwq.lmdeploy_qwq_32b_preview import models as lmdeploy_qwq_32b_preview_model
|
||||||
from opencompass.configs.models.deepseek.deepseek_r1_api_aliyun import models as deepseek_r1_api_aliyun_model
|
# from opencompass.configs.models.deepseek.deepseek_r1_api_aliyun import models as deepseek_r1_api_aliyun_model
|
||||||
|
from opencompass.configs.models.deepseek.deepseek_r1_api_siliconflow import models as deepseek_r1_api_siliconflow_model
|
||||||
from opencompass.configs.models.openai.o1_mini_2024_09_12 import models as o1_mini_2024_09_12_model
|
from opencompass.configs.models.openai.o1_mini_2024_09_12 import models as o1_mini_2024_09_12_model
|
||||||
# from opencompass.configs.models.openai.o3_mini_2025_01_31 import models as o3_mini_2025_01_31_model
|
# from opencompass.configs.models.openai.o3_mini_2025_01_31 import models as o3_mini_2025_01_31_model
|
||||||
|
|
||||||
@ -30,6 +31,12 @@ for model in models:
|
|||||||
'type': 'rm_<think>_before_eval'
|
'type': 'rm_<think>_before_eval'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if model['abbr'].startswith('QwQ'):
|
||||||
|
model['pred_postprocessor'] = {
|
||||||
|
'OpenHuEval_*': {
|
||||||
|
'type': 'extract_qwq_answer_before_eval_for_hustandardfib'
|
||||||
|
}
|
||||||
|
}
|
||||||
del model
|
del model
|
||||||
|
|
||||||
work_dir = './outputs/' + __file__.split('/')[-1].split('.')[0] + '/' # do NOT modify this line, yapf: disable, pylint: disable
|
work_dir = './outputs/' + __file__.split('/')[-1].split('.')[0] + '/' # do NOT modify this line, yapf: disable, pylint: disable
|
||||||
|
@ -243,7 +243,8 @@ def remove_reasoning_part_before_evaluation(text: str):
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
@TEXT_POSTPROCESSORS.register_module('extract_qwq_answer_before_eval')
|
@TEXT_POSTPROCESSORS.register_module(
|
||||||
|
'extract_qwq_answer_before_eval_for_huproverbrea')
|
||||||
def extract_answer_before_evaluation(text: str):
|
def extract_answer_before_evaluation(text: str):
|
||||||
"""Overall, there are three situations in responses of QWQ:
|
"""Overall, there are three situations in responses of QWQ:
|
||||||
|
|
||||||
@ -344,4 +345,41 @@ def extract_answer_before_evaluation(text: str):
|
|||||||
|
|
||||||
answer = '\n\n'.join(text_split[max(ans_start_idx - 1, 0):])
|
answer = '\n\n'.join(text_split[max(ans_start_idx - 1, 0):])
|
||||||
|
|
||||||
return answer, has_answer
|
return answer
|
||||||
|
|
||||||
|
|
||||||
|
@TEXT_POSTPROCESSORS.register_module(
|
||||||
|
'extract_qwq_answer_before_eval_for_hustandardfib')
|
||||||
|
def extract_answer_before_evaluation(text: str):
|
||||||
|
"""The format of the answer from QwQ when inferring HuSimpleQA is \
|
||||||
|
different with others models due to the special prompt."""
|
||||||
|
max_sentence_len = 70
|
||||||
|
if len(re.findall(r'\n\n', text)) > 2:
|
||||||
|
split_mark = '\n\n'
|
||||||
|
else:
|
||||||
|
split_mark = '\n'
|
||||||
|
text_split = text.split(split_mark)
|
||||||
|
last_try_idx = max(len(text_split) - max_sentence_len, 0)
|
||||||
|
ans_start_idx = last_try_idx
|
||||||
|
has_answer = False
|
||||||
|
answer_flags = [
|
||||||
|
'#0#', '#0', 'summar', 'Final Answer', 'final answer', 'Final\nAnswer'
|
||||||
|
]
|
||||||
|
|
||||||
|
for idx, s in enumerate(reversed(text_split)):
|
||||||
|
sen_idx = len(text_split) - 1 - idx
|
||||||
|
if sen_idx < last_try_idx:
|
||||||
|
break
|
||||||
|
|
||||||
|
for af in answer_flags:
|
||||||
|
if af in s:
|
||||||
|
has_answer = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if has_answer:
|
||||||
|
ans_start_idx = sen_idx
|
||||||
|
break
|
||||||
|
|
||||||
|
answer = '\n\n'.join(text_split[max(ans_start_idx - 1, 0):])
|
||||||
|
|
||||||
|
return answer
|
||||||
|
Loading…
Reference in New Issue
Block a user