mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
add LLM as judge setting for matbench
This commit is contained in:
parent
c3779ebfc1
commit
148a97f7a9
@ -1,4 +1,5 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .matbench_gen_f71840 import matbench_datasets # noqa: F401, F403
|
||||
# from .matbench_gen_testing_updated import matbench_datasets # noqa: F401, F403
|
||||
from .matbench_gen_llm_judge import matbench_datasets # noqa: F401, F403
|
||||
|
152
opencompass/configs/datasets/matbench/matbench_gen_llm_judge.py
Normal file
152
opencompass/configs/datasets/matbench/matbench_gen_llm_judge.py
Normal file
@ -0,0 +1,152 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.matbench.matbench import MatbenchDataset
|
||||
from opencompass.datasets.matbench.post_process import generic_llmjudge_postprocess, numerical_llmjudge_postprocess
|
||||
from opencompass.evaluator.generic_llm_evaluator import GenericLLMEvaluator
|
||||
|
||||
# with read_base():
|
||||
# from opencompass.configs.models.qwen3.qwen3_4B import (
|
||||
# models as judge_model,
|
||||
# )
|
||||
|
||||
with read_base():
|
||||
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_14b_instruct import (
|
||||
models as judge_model,
|
||||
)
|
||||
|
||||
JUDGE_TEMPLATE = """
|
||||
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
|
||||
|
||||
Here are some evaluation criteria:
|
||||
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
|
||||
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
|
||||
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
|
||||
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
|
||||
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
|
||||
|
||||
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
Just return the letters "A" or "B", with no text around it.
|
||||
|
||||
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
|
||||
|
||||
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
|
||||
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
|
||||
|
||||
Judging the correctness of candidates' answers:
|
||||
""".strip()
|
||||
|
||||
REGRESSION_JUDGE_TEMPLATE = """
|
||||
Please function as a precise data extraction engine. Your sole task is to process the provided information and return a single specific numerical value as a float.
|
||||
|
||||
Follow these strict output rules:
|
||||
|
||||
1. Your output must be a single floating-point number.
|
||||
2. Do not include any surrounding text, explanations, units, or additional characters.
|
||||
3. If the provided answer is incomplete or does not yield a determined result, output 0.
|
||||
4. Return only the numerical value without any reasoning or additional content.
|
||||
Your specific task is to extract the float result from the following:
|
||||
|
||||
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
|
||||
<Predicted Answer Begin>: \n{prediction}\n<Predicted Answer End>\n\n
|
||||
|
||||
Please provide the final numerical answer as a float:
|
||||
|
||||
""".strip()
|
||||
|
||||
matbench_reader_cfg = dict(
|
||||
input_columns=['problem'], output_column='answer')
|
||||
|
||||
|
||||
matbench_tasks = ['matbench_steels','matbench_expt_gap', 'matbench_expt_is_metal','matbench_glass']
|
||||
|
||||
matbench_datasets = []
|
||||
|
||||
for task in matbench_tasks:
|
||||
if task in ['matbench_glass','matbench_expt_is_metal']:
|
||||
matbench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[dict(role='HUMAN', prompt=f"{{problem}}")])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
matbench_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator, # Use LLM as judge
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=JUDGE_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MatbenchDataset,
|
||||
path=f'opencompass/Matbench/{task}.json',
|
||||
reader_cfg=matbench_reader_cfg,
|
||||
),
|
||||
judge_cfg=judge_model[0],
|
||||
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
|
||||
),
|
||||
)
|
||||
|
||||
elif task in ['matbench_expt_gap','matbench_steels']:
|
||||
matbench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[dict(role='HUMAN', prompt=f"{{problem}}")])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
matbench_eval_cfg = dict(
|
||||
evaluator=dict(
|
||||
type=GenericLLMEvaluator, # 使用LLM作为评估器
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
begin=[
|
||||
dict(
|
||||
role='SYSTEM',
|
||||
fallback_role='HUMAN',
|
||||
prompt="You are a helpful assistant who extracts the answer as float of models' outputs.",
|
||||
)
|
||||
],
|
||||
round=[
|
||||
dict(role='HUMAN', prompt=REGRESSION_JUDGE_TEMPLATE),
|
||||
],
|
||||
),
|
||||
),
|
||||
dataset_cfg=dict(
|
||||
type=MatbenchDataset,
|
||||
path=f'opencompass/Matbench/{task}.json',
|
||||
reader_cfg=matbench_reader_cfg,
|
||||
),
|
||||
judge_cfg=judge_model[0],
|
||||
dict_postprocessor=dict(type=numerical_llmjudge_postprocess),
|
||||
),
|
||||
)
|
||||
|
||||
matbench_datasets.append(
|
||||
dict(
|
||||
type=MatbenchDataset,
|
||||
path=f'opencompass/Matbench/{task}.json',
|
||||
abbr=task,
|
||||
reader_cfg=matbench_reader_cfg,
|
||||
infer_cfg=matbench_infer_cfg,
|
||||
eval_cfg=matbench_eval_cfg))
|
||||
|
@ -0,0 +1,68 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.matbench.matbench import MatbenchDataset, MatbenchEvaluator_regression, MatbenchEvaluator_classification, MatbenchEvaluator_classification_glass
|
||||
|
||||
|
||||
|
||||
|
||||
matbench_reader_cfg = dict(
|
||||
input_columns=['problem'], output_column='answer')
|
||||
|
||||
# matbench_reader_cfg['test_range'] = '[0:8]'
|
||||
|
||||
matbench_tasks = ['matbench_steels','matbench_expt_gap', 'matbench_expt_is_metal','matbench_glass']
|
||||
|
||||
|
||||
matbench_datasets = []
|
||||
|
||||
for task in matbench_tasks:
|
||||
if task in ['matbench_expt_is_metal']:
|
||||
matbench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[dict(role='HUMAN', prompt=f"{{problem}}")])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
matbench_eval_cfg = dict(
|
||||
evaluator=dict(type=MatbenchEvaluator_classification),
|
||||
pred_role='BOT')
|
||||
|
||||
if task in ['matbench_glass']:
|
||||
matbench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[dict(role='HUMAN', prompt=f"{{problem}}")])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
matbench_eval_cfg = dict(
|
||||
evaluator=dict(type=MatbenchEvaluator_classification_glass),
|
||||
pred_role='BOT')
|
||||
|
||||
|
||||
elif task in ['matbench_expt_gap','matbench_steels']:
|
||||
matbench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[dict(role='HUMAN', prompt=f"{{problem}}")])),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer))
|
||||
|
||||
matbench_eval_cfg = dict(
|
||||
evaluator=dict(type=MatbenchEvaluator_regression),
|
||||
pred_role='BOT')
|
||||
|
||||
matbench_datasets.append(
|
||||
dict(
|
||||
type=MatbenchDataset,
|
||||
path=f'opencompass/Matbench/{task}.json',
|
||||
abbr=task,
|
||||
reader_cfg=matbench_reader_cfg,
|
||||
infer_cfg=matbench_infer_cfg,
|
||||
eval_cfg=matbench_eval_cfg))
|
||||
|
@ -1,43 +1,33 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
from datasets import Dataset
|
||||
from sklearn.metrics import (accuracy_score, f1_score, precision_score,
|
||||
recall_score)
|
||||
|
||||
from opencompass.datasets.matbench.post_process import (parse_float_answer,
|
||||
parse_true_false_answer
|
||||
)
|
||||
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
||||
from opencompass.registry import ICL_EVALUATORS, LOAD_DATASET
|
||||
from opencompass.utils import get_data_path
|
||||
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
|
||||
|
||||
from opencompass.datasets.matbench.post_process import parse_float_answer, parse_true_false_answer, parse_has_hasnot_answer
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class MatbenchDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(path, task):
|
||||
path = get_data_path(path)
|
||||
path = os.path.join(path,
|
||||
'matbench_base_fold_0_' + task + '_test.json')
|
||||
def load(path):
|
||||
dataset = []
|
||||
with open(path, 'r', encoding='utf-8') as file:
|
||||
data = json.load(file)
|
||||
for item in data:
|
||||
dataset.append({
|
||||
'problem': item['problem'],
|
||||
'problem': item["problem"],
|
||||
'answer': item['answer'],
|
||||
})
|
||||
dataset = Dataset.from_list(dataset)
|
||||
return dataset
|
||||
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class MatbenchEvaluator_regression(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, references):
|
||||
mae_sum = 0
|
||||
count = 0
|
||||
@ -45,11 +35,11 @@ class MatbenchEvaluator_regression(BaseEvaluator):
|
||||
for pred, ref in zip(predictions, references):
|
||||
pred = parse_float_answer(pred)
|
||||
detail = {'pred': pred, 'answer': ref, 'error': None}
|
||||
count += 1
|
||||
try:
|
||||
error = abs(float(pred) - float(ref))
|
||||
mae_sum += error
|
||||
detail['error'] = error
|
||||
count += 1
|
||||
except Exception as e:
|
||||
detail['error'] = str(e)
|
||||
details.append(detail)
|
||||
@ -64,6 +54,7 @@ class MatbenchEvaluator_classification(BaseEvaluator):
|
||||
def score(self, predictions, references):
|
||||
details = []
|
||||
predictions_parsed = []
|
||||
|
||||
for pred, ref in zip(predictions, references):
|
||||
pred = parse_true_false_answer(pred)
|
||||
detail = {'pred': pred, 'answer': ref, 'correct': False}
|
||||
@ -71,13 +62,12 @@ class MatbenchEvaluator_classification(BaseEvaluator):
|
||||
detail['correct'] = True
|
||||
details.append(detail)
|
||||
predictions_parsed.append(pred)
|
||||
accuracy = accuracy_score(references, predictions_parsed)
|
||||
precision = precision_score(references,
|
||||
predictions_parsed,
|
||||
average='binary')
|
||||
recall = recall_score(references, predictions_parsed, average='binary')
|
||||
f1 = f1_score(references, predictions_parsed, average='binary')
|
||||
|
||||
accuracy = accuracy_score(references, predictions_parsed)
|
||||
precision = precision_score(references, predictions_parsed, average='binary') # Use 'weighted' for multi-class
|
||||
recall = recall_score(references, predictions_parsed, average='binary') # Use 'weighted' for multi-class
|
||||
f1 = f1_score(references, predictions_parsed, average='binary') # Use 'weighted' for multi-class
|
||||
|
||||
return {
|
||||
'accuracy': accuracy,
|
||||
'precision': precision,
|
||||
@ -85,3 +75,31 @@ class MatbenchEvaluator_classification(BaseEvaluator):
|
||||
'f1_score': f1,
|
||||
'details': details
|
||||
}
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class MatbenchEvaluator_classification_glass(BaseEvaluator):
|
||||
|
||||
def score(self, predictions, references):
|
||||
details = []
|
||||
predictions_parsed = []
|
||||
for pred, ref in zip(predictions, references):
|
||||
|
||||
pred = parse_has_hasnot_answer(pred)
|
||||
detail = {'pred': pred, 'answer': ref, 'correct': False}
|
||||
if pred == ref:
|
||||
detail['correct'] = True
|
||||
details.append(detail)
|
||||
predictions_parsed.append(pred)
|
||||
|
||||
accuracy = accuracy_score(references, predictions_parsed)
|
||||
precision = precision_score(references, predictions_parsed, average='binary') # Use 'weighted' for multi-class
|
||||
recall = recall_score(references, predictions_parsed, average='binary') # Use 'weighted' for multi-class
|
||||
f1 = f1_score(references, predictions_parsed, average='binary') # Use 'weighted' for multi-class
|
||||
|
||||
return {
|
||||
'accuracy': accuracy,
|
||||
'precision': precision,
|
||||
'recall': recall,
|
||||
'f1_score': f1,
|
||||
'details': details
|
||||
}
|
@ -1,25 +1,265 @@
|
||||
# flake8: noqa
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from opencompass.utils import get_logger
|
||||
|
||||
def parse_float_answer(raw_string, option=''):
|
||||
number_pattern = re.compile(r'[-+]?\d+(\.\d+)?([eE][-+]?\d+)?')
|
||||
|
||||
# Search for the first match
|
||||
match = number_pattern.search(raw_string)
|
||||
if match:
|
||||
# Extract the matched number and convert it to float
|
||||
return float(match.group())
|
||||
def parse_think(respons):
|
||||
index = respons.find("</think>")
|
||||
if index != -1:
|
||||
return respons[index + len("</think>"):]
|
||||
else:
|
||||
return respons
|
||||
|
||||
|
||||
def get_final_results(judged_answers,
|
||||
references,
|
||||
origial_responses,
|
||||
metric_name='accuracy'):
|
||||
count = 0
|
||||
is_correct_count = 0
|
||||
is_incorrect_count = 0
|
||||
is_not_attempted_count = 0
|
||||
attempted_judge_count = 0
|
||||
details = []
|
||||
for i, j, k in zip(judged_answers, references, origial_responses):
|
||||
if i in ['A', 'B']:
|
||||
attempted_judge_count += 1
|
||||
grade_letter = i
|
||||
detail = {
|
||||
'pred': k,
|
||||
'ref': j,
|
||||
'origin_grade_response': i,
|
||||
'grade_letter': grade_letter,
|
||||
'correct': False,
|
||||
}
|
||||
count += 1
|
||||
if grade_letter == 'A':
|
||||
is_correct_count += 1
|
||||
detail['correct'] = True
|
||||
elif grade_letter == 'B':
|
||||
is_incorrect_count += 1
|
||||
else:
|
||||
is_not_attempted_count += 1
|
||||
details.append(detail)
|
||||
|
||||
is_correct = is_correct_count / count
|
||||
is_incorrect = is_incorrect_count / count
|
||||
is_given_attempted = is_correct + is_incorrect
|
||||
accuracy_given_attempted = (is_correct / is_given_attempted
|
||||
if is_given_attempted > 0 else 0)
|
||||
attempted_judge_ratio = attempted_judge_count / count
|
||||
|
||||
f1 = (2 * accuracy_given_attempted * is_correct /
|
||||
(accuracy_given_attempted + is_correct) if
|
||||
(accuracy_given_attempted + is_correct) > 0 else 0)
|
||||
result = {
|
||||
metric_name: is_correct * 100,
|
||||
f'{metric_name}_given_attempted': accuracy_given_attempted * 100,
|
||||
'f1_score': f1,
|
||||
'attempted_ratio': attempted_judge_ratio * 100,
|
||||
'correct_count': is_correct_count,
|
||||
'incorrect_count': is_incorrect_count,
|
||||
'not_attempted_count': is_not_attempted_count,
|
||||
'details': details,
|
||||
}
|
||||
return result
|
||||
|
||||
def get_numerical_final_results(judged_answers,
|
||||
references,
|
||||
origial_responses,
|
||||
metric_name='mae'):
|
||||
sum_abs_error = 0.0
|
||||
count = 0
|
||||
details = []
|
||||
|
||||
for pred, ref, orig in zip(judged_answers, references, origial_responses):
|
||||
error = abs(pred - ref)
|
||||
print(pred,ref,error,type(pred),type(ref),type(error))
|
||||
|
||||
sum_abs_error += error
|
||||
details.append({
|
||||
'pred': pred,
|
||||
'ref': ref,
|
||||
'origin_response': orig,
|
||||
'error': error
|
||||
})
|
||||
count += 1
|
||||
|
||||
mae = sum_abs_error / count if count > 0 else 0
|
||||
|
||||
result = {
|
||||
metric_name: mae,
|
||||
'details': details
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def _generic_llmjudge_postprocess(judgement: str):
|
||||
judgement = parse_think(judgement)
|
||||
match = re.search(r'(A|B)', judgement)
|
||||
grade_letter = (match.group(0) if match else 'unknown'
|
||||
) # Return 'unknown' if no match
|
||||
return grade_letter
|
||||
|
||||
def _numerical_postprocess(judgement: str):
|
||||
judgement = parse_think(judgement)
|
||||
match = re.search(r'[-+]?\d*\.\d+|\d+\.\d*|\d+', judgement)
|
||||
numerical_answer = (match.group(0) if match else 0
|
||||
) # Return 0 if no match
|
||||
return float(numerical_answer)
|
||||
|
||||
def numerical_llmjudge_postprocess(
|
||||
output: dict,
|
||||
output_path: str,
|
||||
) -> dict:
|
||||
judged_answers = []
|
||||
origial_responses = []
|
||||
references = []
|
||||
for k, v in output.items():
|
||||
origial_responses.append(v['prediction'])
|
||||
processed_judge = _numerical_postprocess(v['prediction'])
|
||||
if processed_judge is not None:
|
||||
judged_answers.append(processed_judge)
|
||||
try:
|
||||
references.append(v['gold'])
|
||||
|
||||
except KeyError:
|
||||
get_logger().warning(
|
||||
f'No gold answer for {k}, use empty string as reference!')
|
||||
references.append('')
|
||||
results = get_numerical_final_results(judged_answers, references, origial_responses)
|
||||
# results['details'] = output
|
||||
return results
|
||||
|
||||
|
||||
def generic_llmjudge_postprocess(
|
||||
output: dict,
|
||||
output_path: str,
|
||||
) -> dict:
|
||||
judged_answers = []
|
||||
origial_responses = []
|
||||
references = []
|
||||
for k, v in output.items():
|
||||
origial_responses.append(v['prediction'])
|
||||
processed_judge = _generic_llmjudge_postprocess(v['prediction'])
|
||||
if processed_judge is not None:
|
||||
judged_answers.append(processed_judge)
|
||||
try:
|
||||
references.append(v['gold'])
|
||||
|
||||
except KeyError:
|
||||
get_logger().warning(
|
||||
f'No gold answer for {k}, use empty string as reference!')
|
||||
references.append('')
|
||||
results = get_final_results(judged_answers, references, origial_responses)
|
||||
# results['details'] = output
|
||||
return results
|
||||
|
||||
|
||||
def contains_elements_and_matches(sentence, chem_elts):
|
||||
matching_elements = [element for element in chem_elts if element in sentence]
|
||||
return (bool(matching_elements), matching_elements)
|
||||
|
||||
def remove_formula(sentence):
|
||||
# 首先尝试识别并移除完整的化学式
|
||||
# 匹配常见的化学式模式,包括括号、数字和多个元素的组合
|
||||
chemical_formula_pattern = r'\b[A-Z][a-z]?\d*(?:[A-Z][a-z]?\d*)*(?:\([A-Z][a-z]?\d*(?:[A-Z][a-z]?\d*)*\)\d*)*\b'
|
||||
sentence = re.sub(chemical_formula_pattern, '', sentence)
|
||||
|
||||
# 识别元素并过滤
|
||||
chem_elts = ['H', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Fr', 'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cn', 'Nh', 'Fl', 'Mc', 'Lv', 'Ts']
|
||||
contains_elements, matching_elements = contains_elements_and_matches(sentence, chem_elts)
|
||||
|
||||
# 过滤掉答案中剩余的化学元素
|
||||
if contains_elements and matching_elements:
|
||||
for element in matching_elements:
|
||||
# 移除化学符号和可能的化合物
|
||||
pattern = re.compile(rf'\b\w*{element}\w*\b')
|
||||
sentence = re.sub(pattern, '', sentence)
|
||||
return sentence
|
||||
|
||||
def verify_float(number):
|
||||
if number < 0:
|
||||
return abs(number)
|
||||
if number >= 0 and number <20:
|
||||
return number
|
||||
else:
|
||||
# Return None if no number is found
|
||||
return 0
|
||||
|
||||
def parse_float_answer(sentence):
|
||||
# Correctly apply remove_formula to the sentence
|
||||
sentence = remove_formula(sentence)
|
||||
|
||||
# First, look for formatted answer:number (case-insensitive, no spaces)
|
||||
processed_string = sentence.lower().replace(" ", "")
|
||||
answer_matches = re.findall(r'answer:(-?\d+(?:\.\d+)?(?:[eE][-+]?\d+)?)', processed_string)
|
||||
if answer_matches:
|
||||
try:
|
||||
return verify_float(float(answer_matches[-1]))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Then find all scientific notation numbers, take the last one
|
||||
sci_matches = re.findall(r'-?\d+(?:\.\d+)?(?:[eE][-+]?\d+)?', sentence)
|
||||
if sci_matches:
|
||||
try:
|
||||
return verify_float(float(sci_matches[-1]))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Lastly, find all regular floats, take the last one
|
||||
float_matches = re.findall(r'-?\d+(?:\.\d+)?', sentence)
|
||||
if float_matches:
|
||||
try:
|
||||
return verify_float(float(float_matches[-1]))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# If no valid number found, return 0.0
|
||||
return 0.0
|
||||
|
||||
|
||||
|
||||
|
||||
def parse_true_false_answer(raw_string, option=''):
|
||||
if 'yes' in raw_string.lower():
|
||||
# print("yes this is called")
|
||||
sentence = remove_formula(raw_string)
|
||||
answer_striped = raw_string.lower().replace(" ", "")
|
||||
if 'answer:true' in answer_striped:
|
||||
return True
|
||||
elif 'no' in raw_string.lower():
|
||||
elif 'answer:false' in answer_striped:
|
||||
return False
|
||||
elif "not" in answer_striped:
|
||||
return False
|
||||
elif "no" in answer_striped:
|
||||
return False
|
||||
elif "yes" in answer_striped:
|
||||
return True
|
||||
elif "itis" in answer_striped:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def parse_has_hasnot_answer(raw_string, option=''):
|
||||
sentence = remove_formula(raw_string)
|
||||
answer_striped = raw_string.lower().replace(" ", "")
|
||||
if 'answer:true' in answer_striped:
|
||||
return True
|
||||
elif 'answer:false' in answer_striped:
|
||||
return False
|
||||
elif "doesnot" in answer_striped:
|
||||
return False
|
||||
elif "not" in answer_striped:
|
||||
return False
|
||||
elif "no" in answer_striped:
|
||||
return False
|
||||
elif "yes" in answer_striped:
|
||||
return True
|
||||
elif "itis" in answer_striped:
|
||||
return True
|
||||
elif "has" in answer_striped:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
Loading…
Reference in New Issue
Block a user