[Feature] Add custom model postprocess function (#1519)

Co-authored-by: liushz <liuhongwei@pjlab.rog.cn>
This commit is contained in:
liushz 2024-09-18 14:40:51 +08:00 committed by GitHub
parent c9a7026f59
commit 2e9db77d57
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 653 additions and 9 deletions

View File

@ -0,0 +1,52 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import GSM8KDataset, gsm8k_dataset_postprocess
from opencompass.datasets import MATHEvaluator, math_postprocess_v2
from opencompass.utils.model_postprocessors import navie_model_postprocess
from opencompass.utils.postprocessors.naive import MATH_NAVIE_PROMPT_TEMPLATE
gsm8k_reader_cfg = dict(input_columns=['question'], output_column='answer')
gsm8k_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{question}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)
# # You can write your own postprocess prompt like:
# GSM8K_NAVIE_PROMPT_TEMPLATE = """
# There is a detailed explanation of the final answer you should extract:
# 1. ...
# 2. ...
# ...
# """
gsm8k_eval_cfg = dict(
evaluator=dict(type=MATHEvaluator, version='v2'),
pred_postprocessor=dict(type=math_postprocess_v2),
dataset_postprocessor=dict(type=gsm8k_dataset_postprocess),
model_postprocessor=dict(
type=navie_model_postprocess,
custom_instruction=MATH_NAVIE_PROMPT_TEMPLATE,
model_name='',
api_url='http://0.0.0.0:23333/v1,http://0.0.0.0:23334/v1')
)
gsm8k_datasets = [
dict(
abbr='gsm8k',
type=GSM8KDataset,
path='opencompass/gsm8k',
reader_cfg=gsm8k_reader_cfg,
infer_cfg=gsm8k_infer_cfg,
eval_cfg=gsm8k_eval_cfg,
)
]

View File

@ -0,0 +1,141 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import FixKRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
from opencompass.datasets import MMLUDataset
from opencompass.utils.text_postprocessors import first_option_postprocess
from opencompass.utils.model_postprocessors import navie_model_postprocess
from opencompass.utils.postprocessors.naive import OPTION_NAVIE_PROMPT_TEMPLATE
# None of the mmlu dataset in huggingface is correctly parsed, so we use our own dataset reader
# Please download the dataset from https://people.eecs.berkeley.edu/~hendrycks/data.tar
mmlu_reader_cfg = dict(
input_columns=['input', 'A', 'B', 'C', 'D'],
output_column='target',
train_split='dev')
mmlu_all_sets = [
'college_biology',
'college_chemistry',
'college_computer_science',
'college_mathematics',
'college_physics',
'electrical_engineering',
'astronomy',
'anatomy',
'abstract_algebra',
'machine_learning',
'clinical_knowledge',
'global_facts',
'management',
'nutrition',
'marketing',
'professional_accounting',
'high_school_geography',
'international_law',
'moral_scenarios',
'computer_security',
'high_school_microeconomics',
'professional_law',
'medical_genetics',
'professional_psychology',
'jurisprudence',
'world_religions',
'philosophy',
'virology',
'high_school_chemistry',
'public_relations',
'high_school_macroeconomics',
'human_sexuality',
'elementary_mathematics',
'high_school_physics',
'high_school_computer_science',
'high_school_european_history',
'business_ethics',
'moral_disputes',
'high_school_statistics',
'miscellaneous',
'formal_logic',
'high_school_government_and_politics',
'prehistory',
'security_studies',
'high_school_biology',
'logical_fallacies',
'high_school_world_history',
'professional_medicine',
'high_school_mathematics',
'college_medicine',
'high_school_us_history',
'sociology',
'econometrics',
'high_school_psychology',
'human_aging',
'us_foreign_policy',
'conceptual_physics',
]
mmlu_datasets = []
for _name in mmlu_all_sets:
_hint = f'There is a single choice question about {_name.replace("_", " ")}. Answer the question by replying A, B, C or D.'
mmlu_infer_cfg = dict(
ice_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
dict(role='BOT', prompt='{target}\n')
]),
),
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin='</E>',
round=[
dict(
role='HUMAN',
prompt=f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
],
),
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=[0, 1, 2, 3, 4]),
inferencer=dict(type=GenInferencer),
)
# # You can write your own postprocess prompt like:
# MMLU_NAVIE_PROMPT_TEMPLATE = """
# There is a detailed explanation of the final answer you should extract:
# 1. ...
# 2. ...
# ...
# """
mmlu_eval_cfg = dict(
evaluator=dict(type=AccwithDetailsEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
model_postprocessor=dict(
type=navie_model_postprocess,
custom_instruction=OPTION_NAVIE_PROMPT_TEMPLATE,
model_name='',
api_url='http://0.0.0.0:23333/v1,http://0.0.0.0:23334/v1')
)
mmlu_datasets.append(
dict(
abbr=f'lukaemon_mmlu_{_name}',
type=MMLUDataset,
path='opencompass/mmlu',
name=_name,
reader_cfg=mmlu_reader_cfg,
infer_cfg=mmlu_infer_cfg,
eval_cfg=mmlu_eval_cfg,
))
del _name, _hint

View File

@ -0,0 +1,52 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.datasets import GSM8KDataset, gsm8k_dataset_postprocess
from opencompass.datasets import MATHEvaluator, math_postprocess_v2
from opencompass.utils.model_postprocessors import navie_model_postprocess
from opencompass.utils.postprocessors.naive import MATH_NAVIE_PROMPT_TEMPLATE
gsm8k_reader_cfg = dict(input_columns=['question'], output_column='answer')
gsm8k_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{question}\nPlease reason step by step, and put your final answer within \\boxed{}.'),
],
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=512),
)
# # You can write your own postprocess prompt like:
# GSM8K_NAVIE_PROMPT_TEMPLATE = """
# There is a detailed explanation of the final answer you should extract:
# 1. ...
# 2. ...
# ...
# """
gsm8k_eval_cfg = dict(
evaluator=dict(type=MATHEvaluator, version='v2'),
pred_postprocessor=dict(type=math_postprocess_v2),
dataset_postprocessor=dict(type=gsm8k_dataset_postprocess),
model_postprocessor=dict(
type=navie_model_postprocess,
custom_instruction=MATH_NAVIE_PROMPT_TEMPLATE,
model_name='',
api_url='http://0.0.0.0:23333/v1,http://0.0.0.0:23334/v1')
)
gsm8k_datasets = [
dict(
abbr='gsm8k',
type=GSM8KDataset,
path='opencompass/gsm8k',
reader_cfg=gsm8k_reader_cfg,
infer_cfg=gsm8k_infer_cfg,
eval_cfg=gsm8k_eval_cfg,
)
]

View File

@ -0,0 +1,141 @@
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import FixKRetriever
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_evaluator import AccwithDetailsEvaluator
from opencompass.datasets import MMLUDataset
from opencompass.utils.text_postprocessors import first_option_postprocess
from opencompass.utils.model_postprocessors import navie_model_postprocess
from opencompass.utils.postprocessors.naive import OPTION_NAVIE_PROMPT_TEMPLATE
# None of the mmlu dataset in huggingface is correctly parsed, so we use our own dataset reader
# Please download the dataset from https://people.eecs.berkeley.edu/~hendrycks/data.tar
mmlu_reader_cfg = dict(
input_columns=['input', 'A', 'B', 'C', 'D'],
output_column='target',
train_split='dev')
mmlu_all_sets = [
'college_biology',
'college_chemistry',
'college_computer_science',
'college_mathematics',
'college_physics',
'electrical_engineering',
'astronomy',
'anatomy',
'abstract_algebra',
'machine_learning',
'clinical_knowledge',
'global_facts',
'management',
'nutrition',
'marketing',
'professional_accounting',
'high_school_geography',
'international_law',
'moral_scenarios',
'computer_security',
'high_school_microeconomics',
'professional_law',
'medical_genetics',
'professional_psychology',
'jurisprudence',
'world_religions',
'philosophy',
'virology',
'high_school_chemistry',
'public_relations',
'high_school_macroeconomics',
'human_sexuality',
'elementary_mathematics',
'high_school_physics',
'high_school_computer_science',
'high_school_european_history',
'business_ethics',
'moral_disputes',
'high_school_statistics',
'miscellaneous',
'formal_logic',
'high_school_government_and_politics',
'prehistory',
'security_studies',
'high_school_biology',
'logical_fallacies',
'high_school_world_history',
'professional_medicine',
'high_school_mathematics',
'college_medicine',
'high_school_us_history',
'sociology',
'econometrics',
'high_school_psychology',
'human_aging',
'us_foreign_policy',
'conceptual_physics',
]
mmlu_datasets = []
for _name in mmlu_all_sets:
_hint = f'There is a single choice question about {_name.replace("_", " ")}. Answer the question by replying A, B, C or D.'
mmlu_infer_cfg = dict(
ice_template=dict(
type=PromptTemplate,
template=dict(round=[
dict(
role='HUMAN',
prompt=
f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
dict(role='BOT', prompt='{target}\n')
]),
),
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin='</E>',
round=[
dict(
role='HUMAN',
prompt=f'{_hint}\nQuestion: {{input}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nAnswer: '
),
],
),
ice_token='</E>',
),
retriever=dict(type=FixKRetriever, fix_id_list=[0, 1, 2, 3, 4]),
inferencer=dict(type=GenInferencer),
)
# # You can write your own postprocess prompt like:
# MMLU_NAVIE_PROMPT_TEMPLATE = """
# There is a detailed explanation of the final answer you should extract:
# 1. ...
# 2. ...
# ...
# """
mmlu_eval_cfg = dict(
evaluator=dict(type=AccwithDetailsEvaluator),
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'),
model_postprocessor=dict(
type=navie_model_postprocess,
custom_instruction=OPTION_NAVIE_PROMPT_TEMPLATE,
model_name='',
api_url='http://0.0.0.0:23333/v1,http://0.0.0.0:23334/v1')
)
mmlu_datasets.append(
dict(
abbr=f'lukaemon_mmlu_{_name}',
type=MMLUDataset,
path='opencompass/mmlu',
name=_name,
reader_cfg=mmlu_reader_cfg,
infer_cfg=mmlu_infer_cfg,
eval_cfg=mmlu_eval_cfg,
))
del _name, _hint

View File

@ -6,12 +6,66 @@ from tqdm import tqdm
from opencompass.registry import TEXT_POSTPROCESSORS
from .postprocessors.naive import NaiveExtractor, format_input_naive
from .postprocessors.xfinder.extractor import Extractor
from .postprocessors.xfinder.xfinder_utils import (DataProcessor,
convert_to_xfinder_format)
def gen_output(ori_data, extractor):
def gen_output_naive(ori_data, extractor):
extracted_answers = []
for item in tqdm(ori_data):
user_input = extractor.prepare_input(item)
extracted_answer = extractor.gen_output(user_input)
item['extracted_answer'] = extracted_answer
extracted_answers.append(extracted_answer)
return extracted_answers
@TEXT_POSTPROCESSORS.register_module('naive')
def navie_model_postprocess(preds: list, model_name: str,
custom_instruction: str, api_url: Union[str, list],
**kwargs) -> list:
"""Postprocess the text extracted by custom model.
Args:
preds (list): The question, reference answer and model prediction.
model_name (str): The name of the model.
custom_instruction (str): Custom instruction for the dataset.
url (Union[str, list]): The api url of the model.
Returns:
list: The postprocessed answers.
"""
def _eval_pred(texts, extractor, num_processes=8):
ori_data = texts
extracted_answers = []
batched_ori_data = []
# Split data into batches
num_processes = min(num_processes, len(ori_data))
batch_size = len(ori_data) // num_processes
for i in range(0, len(ori_data), batch_size):
batched_ori_data.append(ori_data[i:i + batch_size])
with Pool(num_processes) as p:
results = p.map(partial(gen_output_naive, extractor=extractor),
batched_ori_data)
for result in results:
extracted_answers.extend(result)
return extracted_answers
format_data = format_input_naive(preds)
assert api_url is not None, 'Please provide the api url.'
extractor = NaiveExtractor(
model_name=model_name,
custom_instruction=custom_instruction,
url=api_url.split(',') if ',' in api_url else api_url)
calc_acc_func = partial(_eval_pred, extractor=extractor)
extracted_answers = calc_acc_func(format_data)
return extracted_answers
def gen_output_xfinder(ori_data, extractor):
ext_cor_pairs = []
extracted_data = []
extracted_answers = []
@ -30,9 +84,8 @@ def gen_output(ori_data, extractor):
@TEXT_POSTPROCESSORS.register_module('xfinder')
def xfinder_postprocess(preds: list, question_type: str,
xfinder_model_name: str,
xfiner_api_url: Union[str, list], **kwargs) -> list:
def xfinder_postprocess(preds: list, question_type: str, model_name: str,
api_url: Union[str, list], **kwargs) -> list:
"""Postprocess the text extracted by xFinder model.
Args:
preds (list): The question, reference answer and model prediction.
@ -56,7 +109,7 @@ def xfinder_postprocess(preds: list, question_type: str,
for i in range(0, len(ori_data), batch_size):
batched_ori_data.append(ori_data[i:i + batch_size])
with Pool(num_processes) as p:
results = p.map(partial(gen_output, extractor=extractor),
results = p.map(partial(gen_output_xfinder, extractor=extractor),
batched_ori_data)
for result in results:
extracted_answers += result[0]
@ -65,11 +118,11 @@ def xfinder_postprocess(preds: list, question_type: str,
return extracted_answers
format_data = convert_to_xfinder_format(question_type, preds)
assert xfiner_api_url is not None, 'Please provide the api url.'
assert api_url is not None, 'Please provide the api url.'
data_processor = DataProcessor()
extractor = Extractor(model_name=xfinder_model_name,
url=xfiner_api_url.split(',')
if ',' in xfiner_api_url else xfiner_api_url)
extractor = Extractor(
model_name=model_name,
url=api_url.split(',') if ',' in api_url else api_url)
calc_acc_func = partial(_eval_pred,
data_processor=data_processor,
extractor=extractor)

View File

@ -0,0 +1,11 @@
OPTION_NAVIE_PROMPT_TEMPLATE = """
There is a detailed explanation of the final answer you should extract:
1. You should extract the final answer option like 'A', 'B', 'C', 'D' ... from the given output sentences.
2. The question is a single choice question, so the final answer option should be one of the options, not a combination of options.
""" # noqa
MATH_NAVIE_PROMPT_TEMPLATE = """
This is a detailed explanation of the final answer you should extract:
1. The question type is a math question, so the final answer should be a number, set, vector, matrix, interval, expression, function, equation, or inequality and any combination of them.
2. If the final answer includes additional symbols, such as units, you should exclude them and only extract the pure final answer.
""" # noqa

View File

@ -0,0 +1,71 @@
## Short Usage Introduction for Naive Model Postprocessor with Custom Model
<!-- Now OC can use -->
### Step 1: Deploy an API server using vLLM or LMDeploy
```bash
lmdeploy serve api_server meta-llama/Meta-Llama-3-8B-Instruct --model-name llama3-8b-instruct --server-port 23333 --backend turbomind --tp 1
```
### Step 2: Add Naive Model Postprocessor to the configuration file
Take GSM8K as an example, you can add the following lines to the configuration file and replace the `api_url` with the correct address of the API server.
```python
...
from opencompass.utils.model_postprocessors import navie_model_postprocess
from opencompass.utils.postprocessors.naive import MATH_NAVIE_PROMPT_TEMPLATE
...
gsm8k_eval_cfg = dict(
evaluator=dict(type=MATHEvaluator, version='v2'),
pred_postprocessor=dict(type=math_postprocess_v2),
dataset_postprocessor=dict(type=gsm8k_dataset_postprocess),
# Add the following line to use the naive model postprocessor
model_postprocessor=dict(
type=navie_model_postprocess,
custom_instruction=MATH_NAVIE_PROMPT_TEMPLATE,
model_name='llama3-8b-instruct',
api_url='http://0.0.0.0:23333/v1,http://0.0.0.0:23334/v1')
)
...
```
The prompt for extraction can also be customized by changing the `custom_instruction` parameter. Now support two default templates: `MATH_NAVIE_PROMPT_TEMPLATE` for math problems extraction like GSM8K and MATH, and `OPTION_NAVIE_PROMPT_TEMPLATE` for option problems extraction like MMLU. You can also write your own prompt template, like:
```python
OPTION_NAVIE_PROMPT_TEMPLATE = """
There is a detailed explanation of the final answer you should extract:
1. You should extract the final answer option like 'A', 'B', 'C', 'D' ... from the given output sentences.
2. The question is a single choice question, so the final answer option should be one of the options, not a combination of options.
"""
```
Your prompt should start with `There is a detailed explanation of the final answer you should extract:` and following with your customized instructions.
### Step 3: Run the Evaluation as Usual
Now you can run the evaluation as usual with the configuration file you modified. The evaluation will use the custom model as the post-process model to get the final result. The final result will be the `model_postprocess_accuracy` in the evaluation result, like:
```Markdown
dataset version metric mode llama-3-8b-instruct-turbomind
------------------------------------------------- --------- -------------------------- ------ -------------------------------
gsm8k a58960 accuracy gen 73.46
gsm8k a58960 model_postprocess_accuracy gen 78.77
```
## Experiment Results
We have tested the model postprocess method with different models (Qwen2-72B-Chat, Llama3-8b-Chat) as post-process model on the GSM8K, MMLU datasets for `Meta-Llama-3-8B-Instruct` with above settings, and the results are as follows:
```Markdown
| Dataset | Type | Config ID | Regex Postprocess Score | Model Postprocess Score (Llama3-8b-Instruct) | Model Postprocess Score (Qwen2-72B-Chat) |
| ------- | --------------- | ------------------------ | ----------------------- | ----------------------- |----------------------- |
| gsm8k | math | a58960 | 73.46 | 79.08 | 78.77 |
| mmlu | option | 4d595a | 67.89 | 65.26 | 67.94 |
```
The `metric` column with `model_postprocess_accuracy` is the final result after the `Naive Model Postprocessor` is applied.

View File

@ -0,0 +1,2 @@
from .extractor import * # noqa
from .PROMPT_TEMPLATE import * # noqa

View File

@ -0,0 +1,121 @@
# Naive model extractor for OpenCompass, modified from xFinder: https://github.com/IAAR-Shanghai/xFinder # noqa
import json
import time
from logging import getLogger
from openai import OpenAI
Meta_Instruction = """I will provide you with a question, output sentences along with an answer range. The output sentences are the response of the question provided. The answer range could either describe the type of answer expected or list all possible valid answers. Using the information provided, you must accurately and precisely determine and extract the intended key answer from the output sentences. Please don't have your subjective thoughts about the question.
First, you need to determine whether the content of the output sentences is relevant to the given question. If the entire output sentences are unrelated to the question (meaning the output sentences are not addressing the question), then output [No valid answer].
Otherwise, ignore the parts of the output sentences that have no relevance to the question and then extract the key answer that matches the answer range.
Below are some special cases you need to be aware of:
(1) If the output sentences present multiple different answers, carefully determine if the later provided answer is a correction or modification of a previous one. If so, extract this corrected or modified answer as the final response. Conversely, if the output sentences fluctuate between multiple answers without a clear final answer, you should output [No valid answer].
(2) If the answer range is a list and the key answer in the output sentences is not explicitly listed among the candidate options in the answer range, also output [No valid answer].
(3) You should only return the precise answer you extract, without processing the answer. Please return only the answer and do not add any additional content.
""" # noqa
def format_input_naive(data):
format_data = []
for item in data:
template = {}
question = item['origin_prompt'][-1]['prompt']
llm_output = item['prediction']
correct_answer = item['reference'] if item['reference'] else item[
'gold']
template['correct_answer'] = correct_answer
template['question'] = question
template['llm_output'] = llm_output
format_data.append(template)
return format_data
class NaiveExtractor:
def __init__(
self,
model_name,
model_path=None,
url=None,
temperature=0,
max_tokens=3000,
api_key='EMPTY',
SYSTEM='You are a help assistant tasked with extracting the precise key answer from given output sentences. You must only provide the extracted key answer without including any additional text.', # noqa
custom_instruction=''):
self.model_name = model_name
self.SYSTEM = SYSTEM
self.model_path = model_path
self.url = url
self.api_key = api_key
self.temperature = temperature
self.max_tokens = max_tokens
self.custom_instruction = custom_instruction
self.logger = getLogger(__name__)
def prepare_input(self, item):
user_input = Meta_Instruction + self.custom_instruction + \
"Question: \"\"\"" + item['question'] + "\"\"\"\n\n" + \
"Output sentences: \"\"\"" + item['llm_output'] + "\"\"\"\n\n" + \
'Key extracted answer: '
return user_input
def gen_output(self, query):
return self.openai_infer(query)
def openai_infer(self, query: str, retry=9) -> str:
"""Perform inference on the OpenAI model.
Args:
query (str): The input query.
Returns:
str: The extracted answer (xFinder's output).
"""
if isinstance(self.url, list):
# Randomly api for better load balancing
import random
self.url = random.choice(self.url)
self.client = OpenAI(
api_key=self.api_key,
base_url=self.url,
)
self.retry = retry
t = time.time()
retry = self.retry
response = ''
while retry > 0:
try:
chat_response = self.client.chat.completions.create(
model=self.client.models.list().data[0].id
if self.model_name == '' else self.model_name,
messages=[
{
'role': 'system',
'content': self.SYSTEM
},
{
'role': 'user',
'content': query
},
],
temperature=self.temperature,
max_tokens=self.max_tokens,
)
js_response = json.loads(chat_response.model_dump_json())
response = js_response['choices'][0]['message']['content']
break
except Exception as e:
self.logger.info(f'Error: {e}')
self.logger.info(f'{self.url} is down. Retrying...')
self.logger.info(f'Time elapsed: {time.time() - t} seconds')
time.sleep(6)
retry -= 1
if retry == 0:
response = 'Error: Failed to get response.'
self.logger.info(f'{response} after {self.retry} tries.')
raise ValueError('The api is down')
return response.strip()