mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Sync] Merge branch 'dev' into zfz/update-keyset-demo (#876)
This commit is contained in:
parent
32b5948f4e
commit
d34ba11106
@ -7,6 +7,7 @@ exclude: |
|
||||
opencompass/datasets/lawbench/utils|
|
||||
opencompass/datasets/lawbench/evaluation_functions/|
|
||||
opencompass/datasets/medbench/|
|
||||
opencompass/datasets/teval/|
|
||||
opencompass/datasets/NPHardEval/|
|
||||
docs/zh_cn/advanced_guides/compassbench_intro.md
|
||||
)
|
||||
|
@ -7,6 +7,7 @@ exclude: |
|
||||
opencompass/datasets/lawbench/utils|
|
||||
opencompass/datasets/lawbench/evaluation_functions/|
|
||||
opencompass/datasets/medbench/|
|
||||
opencompass/datasets/teval/|
|
||||
opencompass/datasets/NPHardEval/|
|
||||
docs/zh_cn/advanced_guides/compassbench_intro.md
|
||||
)
|
||||
|
108
configs/datasets/MathBench/mathbench_2024_gen_de9ff9.py
Normal file
108
configs/datasets/MathBench/mathbench_2024_gen_de9ff9.py
Normal file
@ -0,0 +1,108 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator
|
||||
from opencompass.datasets import MathBenchDataset, mathbench_postprocess
|
||||
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||
|
||||
single_choice_prompts = {
|
||||
"single_choice_cn_with_reasoning": "以下是一道关于数学的单项选择题,请你一步一步推理,并在最后用“所以答案为选项X”给出答案,其中“X”为选项A,B,C,D中你认为正确的选项。下面是你要回答的问题\n{question}\n让我们一步一步思考:\n",
|
||||
"single_choice_cn": "以下是一道关于数学的单项选择题,请你直接回答正确答案的选项序号。\n下面是你要回答的题目:\n{question}\n答案选项:",
|
||||
"single_choice_en_with_reasoning": "Here is a multiple-choice question about mathematics. Please reason through it step by step, and at the end, provide your answer option with 'Therefore, the correct answer is option X', Where 'X' is the correct option you think from A,B,C,D. Here is the question you need to answer:\n{question}\nLet's think step by step:",
|
||||
"single_choice_en": "Here is a multiple-choice question about mathematics. Please provide the correct answer option directly.\nHere is the question you need to answer:\n{question}\nAnswer option:",
|
||||
}
|
||||
|
||||
cloze_prompts = {
|
||||
"cloze_cn": [
|
||||
dict(role='HUMAN', prompt='Q: 林中有15棵树。林务工人员今天将在林中种植树木。完成后,将有21棵树。林务工人员今天种植了多少棵树?'),
|
||||
dict(role='BOT', prompt='A: 我们从15棵树开始。后来有21棵树。差值必定是他们种植的树木数量。所以,他们必须种植了21 - 15 = 6棵树。答案是 6\n'),
|
||||
dict(role='HUMAN', prompt='Q: 如果停车场有3辆车,又有2辆车进来,停车场里有多少辆车?'),
|
||||
dict(role='BOT', prompt='A: 停车场已经有3辆车。又进来了2辆车。现在有3 + 2 = 5辆车。答案是 5\n'),
|
||||
dict(role='HUMAN', prompt='Q: 黎恩有32块巧克力,她的妹妹有42块。如果他们吃了35块,他们总共剩下多少块?'),
|
||||
dict(role='BOT', prompt='A: 黎恩有32块巧克力,Leah的妹妹有42块。这意味着原本有32 + 42 = 74块巧克力。被吃掉了35块。所以他们总共还剩下74 - 35 = 39块巧克力。答案是 39\n'),
|
||||
dict(role='HUMAN', prompt='Q: 杰森有20个棒棒糖。他给丹妮一些棒棒糖。现在Jason只剩下12个棒棒糖。杰森给丹妮多少个棒棒糖?'),
|
||||
dict(role='BOT', prompt='A: 杰森有20个棒棒糖。因为他现在只剩下12个,所以他必须把剩下的都给了丹妮。他给丹妮的棒棒糖数量必定是20 - 12 = 8个。答案是 8\n'),
|
||||
dict(role='HUMAN', prompt='Q: 莎莎有五个玩具。在圣诞节,他从他的爸爸和妈妈那里各得到了两个玩具。现在他有多少个玩具?'),
|
||||
dict(role='BOT', prompt='A: 她有5个玩具。他从妈妈那里得到了2个,所以之后他有5 + 2 = 7个玩具。然后他从爸爸那里得到了2个,所以总共他有7 + 2 = 9个玩具。答案是 9\n'),
|
||||
dict(role='HUMAN', prompt='Q: 服务器房间里有九台电脑。从周一到周四每天增加五台电脑。现在服务器房里有多少台电脑?'),
|
||||
dict(role='BOT', prompt='A: 从周一到周四有4天。每天增加5台电脑。这意味着总共增加了4 * 5 = 20台电脑。一开始有9台电脑,所以现在有9 + 20 = 29台电脑。答案是 29\n'),
|
||||
dict(role='HUMAN', prompt='Q: 迈克尔有58个高尔夫球。星期二,他丢失了23个高尔夫球。星期三,他又丢失了2个。星期三结束时他还剩下多少个高尔夫球?'),
|
||||
dict(role='BOT', prompt='A: 迈克尔一开始有58个球。星期二他丢失了23个,所以之后他还剩下58 - 23 = 35个球。星期三他又丢失了2个,所以现在他还剩下35 - 2 = 33个球。答案是 33\n'),
|
||||
dict(role='HUMAN', prompt='Q: 奥利弗有23美元。她用每个3美元的价格买了五个百吉饼。她还剩下多少钱?'),
|
||||
dict(role='BOT', prompt='A: 她以每个3美元的价格买了5个百吉饼。这意味着她在百吉饼上花费了5 * 3 = 15美元。她一开始有23美元,所以现在她还剩下23 - 15 = 8美元。答案是 8\n'),
|
||||
dict(role='HUMAN', prompt='Q: {question}'),
|
||||
dict(role='BOT', prompt='A: {answer}'),
|
||||
],
|
||||
"cloze_en": [
|
||||
dict(role='HUMAN', prompt='Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?'),
|
||||
dict(role='BOT', prompt='A: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.\n'),
|
||||
dict(role='HUMAN', prompt='Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?'),
|
||||
dict(role='BOT', prompt='A: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\n'),
|
||||
dict(role='HUMAN', prompt='Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?'),
|
||||
dict(role='BOT', prompt="A: Leah had 32 chocolates and Leah's sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.\n"),
|
||||
dict(role='HUMAN', prompt='Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?'),
|
||||
dict(role='BOT', prompt='A: Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.\n'),
|
||||
dict(role='HUMAN', prompt='Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?'),
|
||||
dict(role='BOT', prompt='A: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.\n'),
|
||||
dict(role='HUMAN', prompt='Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?'),
|
||||
dict(role='BOT', prompt='A: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.\n'),
|
||||
dict(role='HUMAN', prompt='Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?'),
|
||||
dict(role='BOT', prompt='A: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.\n'),
|
||||
dict(role='HUMAN', prompt='Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?'),
|
||||
dict(role='BOT', prompt='A: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.\n'),
|
||||
dict(role='HUMAN', prompt='Q: {question}'),
|
||||
dict(role='BOT', prompt='A: {answer}\n'),
|
||||
]}
|
||||
|
||||
mathbench_sets = {
|
||||
'college': ['single_choice_cn', 'single_choice_en'],
|
||||
'high': ['single_choice_cn', 'single_choice_en'],
|
||||
'middle': ['single_choice_cn', 'single_choice_en'],
|
||||
'primary': ['cloze_cn', 'cloze_en'],
|
||||
'calculate': ['cloze_en'],
|
||||
}
|
||||
|
||||
# Generate reasoning path or not, only for single choice
|
||||
with_reasoning = True
|
||||
|
||||
# Use circular evaluation or not
|
||||
with_circular_eval = True
|
||||
|
||||
mathbench_datasets = []
|
||||
|
||||
for _split in list(mathbench_sets.keys()):
|
||||
for _name in mathbench_sets[_split]:
|
||||
mathbench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(
|
||||
role="HUMAN",
|
||||
prompt=single_choice_prompts[_name + "_with_reasoning"] if with_reasoning else single_choice_prompts[_name],
|
||||
),
|
||||
dict(role="BOT", prompt="{answer}")] if 'choice' in _name else cloze_prompts[_name],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512),
|
||||
)
|
||||
|
||||
mathbench_eval_cfg = dict(
|
||||
evaluator=dict(type=CircularEvaluator if 'choice' in _name and with_circular_eval else AccEvaluator),
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD') if 'single_choice' in _name else dict(type=mathbench_postprocess, name=_name))
|
||||
|
||||
mathbench_datasets.append(
|
||||
dict(
|
||||
abbr="mathbench-" + _split + '-' + _name,
|
||||
type=MathBenchDataset,
|
||||
path=f"./data/mathbench/{_split}",
|
||||
name=_name,
|
||||
with_circular=with_circular_eval,
|
||||
reader_cfg=dict(
|
||||
input_columns=["question"],
|
||||
output_column="answer"
|
||||
),
|
||||
infer_cfg=mathbench_infer_cfg,
|
||||
eval_cfg=mathbench_eval_cfg,
|
||||
))
|
@ -1,4 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .mathbench_gen_7b734b import mathbench_datasets # noqa: F401, F403
|
||||
from .mathbench_2024_gen_de9ff9 import mathbench_datasets # noqa: F401, F403
|
||||
|
@ -0,0 +1,55 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import PPLOnlyInferencer
|
||||
from opencompass.openicl.icl_evaluator import AveragePPLEvaluator
|
||||
from opencompass.datasets import JsonlDataset
|
||||
|
||||
ceval_datasets = []
|
||||
|
||||
ceval_infer_cfg = dict(
|
||||
prompt_template=dict(type=PromptTemplate, template="{text}"),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=PPLOnlyInferencer),
|
||||
)
|
||||
|
||||
ceval_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
|
||||
|
||||
ceval_reader_cfg = dict(
|
||||
input_columns=['text'],
|
||||
output_column=None,
|
||||
)
|
||||
|
||||
ceval_datasets.append(
|
||||
dict(
|
||||
abbr=f'ceval-val-ppl',
|
||||
type=JsonlDataset,
|
||||
path='/mnt/petrelfs/zhoufengzhe/repos/cscripts/mock-datas/ceval_val_content.jsonl',
|
||||
reader_cfg=ceval_reader_cfg,
|
||||
infer_cfg=ceval_infer_cfg,
|
||||
eval_cfg=ceval_eval_cfg
|
||||
)
|
||||
)
|
||||
|
||||
ceval_infer_cfg = dict(
|
||||
prompt_template=dict(type=PromptTemplate, template="{rephrase}"),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=PPLOnlyInferencer),
|
||||
)
|
||||
|
||||
ceval_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
|
||||
|
||||
ceval_reader_cfg = dict(
|
||||
input_columns=['rephrase'],
|
||||
output_column=None,
|
||||
)
|
||||
|
||||
ceval_datasets.append(
|
||||
dict(
|
||||
abbr=f'ceval-ref-ppl',
|
||||
type=JsonlDataset,
|
||||
path='/mnt/petrelfs/zhoufengzhe/repos/cscripts/mock-datas/ceval_val_content.jsonl',
|
||||
reader_cfg=ceval_reader_cfg,
|
||||
infer_cfg=ceval_infer_cfg,
|
||||
eval_cfg=ceval_eval_cfg
|
||||
)
|
||||
)
|
@ -0,0 +1,57 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import PPLOnlyInferencer
|
||||
from opencompass.openicl.icl_evaluator import AveragePPLEvaluator
|
||||
from opencompass.datasets import SanitizedMBPPDataset, JsonlDataset
|
||||
|
||||
mbpp_datasets = []
|
||||
|
||||
mbpp_infer_cfg = dict(
|
||||
prompt_template=dict(type=PromptTemplate, template="{text}\n{code}"),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=PPLOnlyInferencer),
|
||||
)
|
||||
|
||||
mbpp_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
|
||||
|
||||
for split in ['train', 'test']:
|
||||
mbpp_reader_cfg = dict(
|
||||
input_columns=['text', 'code'],
|
||||
output_column=None,
|
||||
train_split=split,
|
||||
test_split=split,
|
||||
)
|
||||
mbpp_datasets.append(
|
||||
dict(
|
||||
abbr=f'mbpp-{split}-ppl',
|
||||
type=SanitizedMBPPDataset,
|
||||
path='./data/mbpp/sanitized-mbpp.jsonl',
|
||||
reader_cfg=mbpp_reader_cfg,
|
||||
infer_cfg=mbpp_infer_cfg,
|
||||
eval_cfg=mbpp_eval_cfg)
|
||||
)
|
||||
|
||||
|
||||
mbpp_infer_cfg = dict(
|
||||
prompt_template=dict(type=PromptTemplate, template="{text}"),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=PPLOnlyInferencer),
|
||||
)
|
||||
|
||||
mbpp_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
|
||||
|
||||
mbpp_reader_cfg = dict(
|
||||
input_columns=['text'],
|
||||
output_column=None,
|
||||
)
|
||||
|
||||
mbpp_datasets.append(
|
||||
dict(
|
||||
abbr=f'mbpp-ref-ppl',
|
||||
type=JsonlDataset,
|
||||
path='/mnt/petrelfs/zhoufengzhe/repos/cscripts/mock-datas/mock_mbpp_20240113.jsonl',
|
||||
reader_cfg=mbpp_reader_cfg,
|
||||
infer_cfg=mbpp_infer_cfg,
|
||||
eval_cfg=mbpp_eval_cfg
|
||||
)
|
||||
)
|
@ -0,0 +1,55 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import PPLOnlyInferencer
|
||||
from opencompass.openicl.icl_evaluator import AveragePPLEvaluator
|
||||
from opencompass.datasets import JsonlDataset
|
||||
|
||||
mmlu_datasets = []
|
||||
|
||||
mmlu_infer_cfg = dict(
|
||||
prompt_template=dict(type=PromptTemplate, template="{text}"),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=PPLOnlyInferencer),
|
||||
)
|
||||
|
||||
mmlu_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
|
||||
|
||||
mmlu_reader_cfg = dict(
|
||||
input_columns=['text'],
|
||||
output_column=None,
|
||||
)
|
||||
|
||||
mmlu_datasets.append(
|
||||
dict(
|
||||
abbr=f'mmlu-test-ppl',
|
||||
type=JsonlDataset,
|
||||
path='/mnt/petrelfs/zhoufengzhe/repos/cscripts/mock-datas/mmlu_test_content.jsonl',
|
||||
reader_cfg=mmlu_reader_cfg,
|
||||
infer_cfg=mmlu_infer_cfg,
|
||||
eval_cfg=mmlu_eval_cfg
|
||||
)
|
||||
)
|
||||
|
||||
mmlu_infer_cfg = dict(
|
||||
prompt_template=dict(type=PromptTemplate, template="{rephrase}"),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=PPLOnlyInferencer),
|
||||
)
|
||||
|
||||
mmlu_eval_cfg = dict(evaluator=dict(type=AveragePPLEvaluator))
|
||||
|
||||
mmlu_reader_cfg = dict(
|
||||
input_columns=['rephrase'],
|
||||
output_column=None,
|
||||
)
|
||||
|
||||
mmlu_datasets.append(
|
||||
dict(
|
||||
abbr=f'mmlu-ref-ppl',
|
||||
type=JsonlDataset,
|
||||
path='/mnt/petrelfs/zhoufengzhe/repos/cscripts/mock-datas/mmlu_test_content.jsonl',
|
||||
reader_cfg=mmlu_reader_cfg,
|
||||
infer_cfg=mmlu_infer_cfg,
|
||||
eval_cfg=mmlu_eval_cfg
|
||||
)
|
||||
)
|
@ -1,7 +1,7 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess
|
||||
from opencompass.datasets import HumanevalDataset, HumanEvaluator, humaneval_postprocess_v2
|
||||
|
||||
humaneval_reader_cfg = dict(
|
||||
input_columns=['prompt'], output_column='task_id', train_split='test')
|
||||
@ -22,7 +22,7 @@ humaneval_eval_cfg = dict(
|
||||
evaluator=dict(type=HumanEvaluator),
|
||||
pred_role='BOT',
|
||||
k=[1, 10, 100], # the parameter only for humaneval
|
||||
pred_postprocessor=dict(type=humaneval_postprocess),
|
||||
pred_postprocessor=dict(type=humaneval_postprocess_v2),
|
||||
)
|
||||
|
||||
humaneval_datasets = [
|
||||
|
@ -3,7 +3,7 @@ from opencompass.openicl.icl_retriever import FixKRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.datasets import MMLUDataset
|
||||
from opencompass.utils.text_postprocessors import first_capital_postprocess
|
||||
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||
|
||||
# None of the mmlu dataset in huggingface is correctly parsed, so we use our own dataset reader
|
||||
# Please download the dataset from https://people.eecs.berkeley.edu/~hendrycks/data.tar
|
||||
@ -108,7 +108,7 @@ for _name in mmlu_all_sets:
|
||||
|
||||
mmlu_eval_cfg = dict(
|
||||
evaluator=dict(type=AccEvaluator),
|
||||
pred_postprocessor=dict(type=first_capital_postprocess))
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABCD'))
|
||||
|
||||
mmlu_datasets.append(
|
||||
dict(
|
||||
|
@ -1,7 +1,8 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets.custom import OptionSimAccEvaluator
|
||||
from opencompass.openicl.icl_evaluator import AccEvaluator
|
||||
from opencompass.utils.text_postprocessors import first_option_postprocess
|
||||
from opencompass.datasets import siqaDataset_V3
|
||||
|
||||
siqa_reader_cfg = dict(
|
||||
@ -26,8 +27,8 @@ siqa_infer_cfg = dict(
|
||||
)
|
||||
|
||||
siqa_eval_cfg = dict(
|
||||
evaluator=dict(type=OptionSimAccEvaluator, options='ABC'),
|
||||
pred_role="BOT",
|
||||
evaluator=dict(type=AccEvaluator),
|
||||
pred_postprocessor=dict(type=first_option_postprocess, options='ABC')
|
||||
)
|
||||
|
||||
siqa_datasets = [
|
||||
|
22
configs/datasets/teval/README.md
Normal file
22
configs/datasets/teval/README.md
Normal file
@ -0,0 +1,22 @@
|
||||
# T-Eval
|
||||
|
||||
Tool utilization is comprehensively decomposed into multiple sub-processes, including instruction following, planning, reasoning, retrieval, understanding, and review. Based on that, T-Eval is introduced to evaluate the tool-utilization capability step by step. T-Eval disentangles the tool utilization evaluation into several sub-domains along model capabilities, facilitating the inner understanding of both holistic and isolated competency of LLMs.
|
||||
|
||||
[Paper](https://arxiv.org/abs/2312.14033)
|
||||
|
||||
[Project Page](https://open-compass.github.io/T-Eval/)
|
||||
|
||||
[LeaderBoard](https://open-compass.github.io/T-Eval/leaderboard.html)
|
||||
|
||||
[HuggingFace](https://huggingface.co/datasets/lovesnowbest/T-Eval)
|
||||
|
||||
## Citation
|
||||
|
||||
```
|
||||
@article{chen2023t,
|
||||
title={T-Eval: Evaluating the Tool Utilization Capability Step by Step},
|
||||
author={Chen, Zehui and Du, Weihua and Zhang, Wenwei and Liu, Kuikun and Liu, Jiangning and Zheng, Miao and Zhuo, Jingming and Zhang, Songyang and Lin, Dahua and Chen, Kai and others},
|
||||
journal={arXiv preprint arXiv:2312.14033},
|
||||
year={2023}
|
||||
}
|
||||
```
|
4
configs/datasets/teval/teval_en_gen.py
Normal file
4
configs/datasets/teval/teval_en_gen.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .teval_en_gen_1ac254 import teval_datasets
|
52
configs/datasets/teval/teval_en_gen_1ac254.py
Normal file
52
configs/datasets/teval/teval_en_gen_1ac254.py
Normal file
@ -0,0 +1,52 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import ChatInferencer
|
||||
from opencompass.openicl.icl_evaluator import TEvalEvaluator
|
||||
from opencompass.datasets import teval_postprocess, TEvalDataset
|
||||
|
||||
teval_subject_mapping = {
|
||||
"instruct": ["instruct_v1"],
|
||||
"plan": ["plan_json_v1", "plan_str_v1"],
|
||||
"review": ["review_str_v1"],
|
||||
"reason_retrieve_understand": ["reason_retrieve_understand_json_v1"],
|
||||
"reason": ["reason_str_v1"],
|
||||
"retrieve": ["retrieve_str_v1"],
|
||||
"understand": ["understand_str_v1"],
|
||||
}
|
||||
|
||||
teval_reader_cfg = dict(input_columns=["prompt"], output_column="ground_truth")
|
||||
|
||||
teval_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role="HUMAN", prompt="{prompt}"),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=ChatInferencer),
|
||||
)
|
||||
|
||||
teval_all_sets = list(teval_subject_mapping.keys())
|
||||
|
||||
teval_datasets = []
|
||||
for _name in teval_all_sets:
|
||||
teval_eval_cfg = dict(
|
||||
evaluator=dict(type=TEvalEvaluator, subset=_name),
|
||||
pred_postprocessor=dict(type=teval_postprocess),
|
||||
num_gpus=1,
|
||||
)
|
||||
for subset in teval_subject_mapping[_name]:
|
||||
teval_datasets.append(
|
||||
dict(
|
||||
abbr="teval-" + subset,
|
||||
type=TEvalDataset,
|
||||
path="./data/teval/EN",
|
||||
name=subset,
|
||||
reader_cfg=teval_reader_cfg,
|
||||
infer_cfg=teval_infer_cfg,
|
||||
eval_cfg=teval_eval_cfg,
|
||||
)
|
||||
)
|
4
configs/datasets/teval/teval_zh_gen.py
Normal file
4
configs/datasets/teval/teval_zh_gen.py
Normal file
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .teval_zh_gen_1ac254 import teval_datasets
|
52
configs/datasets/teval/teval_zh_gen_1ac254.py
Normal file
52
configs/datasets/teval/teval_zh_gen_1ac254.py
Normal file
@ -0,0 +1,52 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import ChatInferencer
|
||||
from opencompass.openicl.icl_evaluator import TEvalEvaluator
|
||||
from opencompass.datasets import teval_postprocess, TEvalDataset
|
||||
|
||||
teval_subject_mapping = {
|
||||
"instruct_zh": ["instruct_v1_zh"],
|
||||
"plan_zh": ["plan_json_v1_zh", "plan_str_v1_zh"],
|
||||
"review_zh": ["review_str_v1_zh"],
|
||||
"reason_retrieve_understand_zh": ["reason_retrieve_understand_json_v1_zh"],
|
||||
"reason_zh": ["reason_str_v1_zh"],
|
||||
"retrieve_zh": ["retrieve_str_v1_zh"],
|
||||
"understand_zh": ["understand_str_v1_zh"],
|
||||
}
|
||||
|
||||
teval_reader_cfg = dict(input_columns=["prompt"], output_column="ground_truth")
|
||||
|
||||
teval_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role="HUMAN", prompt="{prompt}"),
|
||||
],
|
||||
),
|
||||
),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=ChatInferencer),
|
||||
)
|
||||
|
||||
teval_all_sets = list(teval_subject_mapping.keys())
|
||||
|
||||
teval_datasets = []
|
||||
for _name in teval_all_sets:
|
||||
teval_eval_cfg = dict(
|
||||
evaluator=dict(type=TEvalEvaluator, subset=_name),
|
||||
pred_postprocessor=dict(type=teval_postprocess),
|
||||
num_gpus=1,
|
||||
)
|
||||
for subset in teval_subject_mapping[_name]:
|
||||
teval_datasets.append(
|
||||
dict(
|
||||
abbr="teval-" + subset,
|
||||
type=TEvalDataset,
|
||||
path="./data/teval/ZH",
|
||||
name=subset,
|
||||
reader_cfg=teval_reader_cfg,
|
||||
infer_cfg=teval_infer_cfg,
|
||||
eval_cfg=teval_eval_cfg,
|
||||
)
|
||||
)
|
15
configs/eval_internlm_math_chat.py
Normal file
15
configs/eval_internlm_math_chat.py
Normal file
@ -0,0 +1,15 @@
|
||||
from mmengine.config import read_base
|
||||
from opencompass.models.huggingface import HuggingFaceCausalLM
|
||||
|
||||
with read_base():
|
||||
# choose a list of datasets
|
||||
from .datasets.gsm8k.gsm8k_gen import gsm8k_datasets
|
||||
from .datasets.math.math_gen_736506 import math_datasets
|
||||
|
||||
from .models.hf_internlm.hf_internlm2_chat_math_7b import models as internlm_math_chat_7b_models
|
||||
from .models.hf_internlm.hf_internlm2_chat_math_20b import models as internlm_math_chat_20b_models
|
||||
|
||||
datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
||||
# Eval Math and GSM8k for both Internlm-Math-Chat-7B and 20b
|
||||
datasets = [*math_datasets, *gsm8k_datasets]
|
||||
models = [*internlm_math_chat_7b_models, *internlm_math_chat_20b_models]
|
70
configs/eval_teval.py
Normal file
70
configs/eval_teval.py
Normal file
@ -0,0 +1,70 @@
|
||||
from copy import deepcopy
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .datasets.teval.teval_en_gen_1ac254 import teval_datasets as teval_en_datasets
|
||||
from .datasets.teval.teval_zh_gen_1ac254 import teval_datasets as teval_zh_datasets
|
||||
|
||||
from .models.qwen.hf_qwen_7b_chat import models as hf_qwen_7b_chat_model
|
||||
from .models.hf_internlm.hf_internlm2_chat_7b import models as hf_internlm2_chat_7b_model
|
||||
from .models.hf_llama.hf_llama2_7b_chat import models as hf_llama2_7b_chat_model
|
||||
|
||||
from .summarizers.teval import summarizer
|
||||
|
||||
meta_template_system_patches = {
|
||||
'internlm2-chat-7b-hf': dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
|
||||
'internlm2-chat-20b-hf': dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
|
||||
}
|
||||
|
||||
_origin_models = sum([v for k, v in locals().items() if k.endswith("_model")], [])
|
||||
models = []
|
||||
for m in _origin_models:
|
||||
m = deepcopy(m)
|
||||
if 'meta_template' in m and 'round' in m['meta_template']:
|
||||
round = m['meta_template']['round']
|
||||
if all(r['role'].upper() != 'SYSTEM' for r in round): # no system round
|
||||
if m['abbr'] in meta_template_system_patches:
|
||||
system_round = meta_template_system_patches[m['abbr']]
|
||||
else:
|
||||
system_round = [r for r in round if r['role'].upper() == 'HUMAN'][0]
|
||||
system_round = deepcopy(system_round)
|
||||
system_round['role'] = 'SYSTEM'
|
||||
m['meta_template']['round'].append(system_round)
|
||||
else:
|
||||
raise ValueError(f'no meta_template.round in {m.get("abbr", None)}')
|
||||
|
||||
print(f'model {m["abbr"]} is using the following meta_template: {m["meta_template"]}')
|
||||
models.append(m)
|
||||
|
||||
datasets = teval_en_datasets + teval_zh_datasets
|
||||
work_dir = './outputs/teval'
|
||||
|
||||
|
||||
'''
|
||||
dataset version metric mode qwen-7b-chat-hf internlm2-chat-7b-hf llama-2-7b-chat-hf
|
||||
------------------------------------------- --------- -------------- ------- ----------------- ---------------------- --------------------
|
||||
teval - naive_average unknown 57.69 78.18 36.63
|
||||
teval-instruct_v1 10482d string_metric unknown 28.83 98.08 50.27
|
||||
teval-instruct_v1 10482d json_metric unknown 94.32 97.08 0.15
|
||||
teval-plan_str_v1 10482d f1_score unknown 66.24 84.12 45.72
|
||||
teval-plan_json_v1 10482d f1_score unknown 63.62 77.71 19.95
|
||||
teval-reason_str_v1 10482d thought unknown 54.14 63.58 44.92
|
||||
teval-reason_retrieve_understand_json_v1 10482d thought unknown 33.77 54.72 21.49
|
||||
teval-retrieve_str_v1 10482d name unknown 73.89 85.28 60.6
|
||||
teval-reason_retrieve_understand_json_v1 10482d name unknown 31.15 68.97 15.34
|
||||
teval-understand_str_v1 10482d args unknown 77.76 93.03 65.61
|
||||
teval-reason_retrieve_understand_json_v1 10482d args unknown 44.16 72.23 26.84
|
||||
teval-review_str_v1 10482d review_quality unknown 62.22 71.66 44.35
|
||||
teval_zh - naive_average unknown 61.31 75.01 32.33
|
||||
teval-instruct_v1_zh 10482d string_metric unknown 88.69 98.19 23.64
|
||||
teval-instruct_v1_zh 10482d json_metric unknown 75.77 96.62 0.89
|
||||
teval-plan_str_v1_zh 10482d f1_score unknown 62.43 70.69 47.82
|
||||
teval-plan_json_v1_zh 10482d f1_score unknown 61.46 68.95 15.87
|
||||
teval-reason_str_v1_zh 10482d thought unknown 59.43 68.14 46.96
|
||||
teval-reason_retrieve_understand_json_v1_zh 10482d thought unknown 39.19 60.37 23.91
|
||||
teval-retrieve_str_v1_zh 10482d name unknown 69.41 84.22 54.44
|
||||
teval-reason_retrieve_understand_json_v1_zh 10482d name unknown 32.87 70.46 14.16
|
||||
teval-understand_str_v1_zh 10482d args unknown 84.39 88.62 77.29
|
||||
teval-reason_retrieve_understand_json_v1_zh 10482d args unknown 48.71 72.71 28.83
|
||||
teval-review_str_v1_zh 10482d review_quality unknown 56.67 60.57 27.1
|
||||
'''
|
26
configs/models/hf_internlm/hf_internlm2_1_8b.py
Normal file
26
configs/models/hf_internlm/hf_internlm2_1_8b.py
Normal file
@ -0,0 +1,26 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-1.8b-hf',
|
||||
path="internlm/internlm2-1_8b",
|
||||
tokenizer_path='internlm/internlm2-1_8b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
min_out_len=1,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
@ -18,6 +18,7 @@ models = [
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
min_out_len=1,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
|
@ -18,7 +18,7 @@ models = [
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
min_out_len=3,
|
||||
min_out_len=1,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
|
26
configs/models/hf_internlm/hf_internlm2_base_20b.py
Normal file
26
configs/models/hf_internlm/hf_internlm2_base_20b.py
Normal file
@ -0,0 +1,26 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-base-20b-hf',
|
||||
path="internlm/internlm2-base-20b",
|
||||
tokenizer_path='internlm/internlm2-base-20b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
min_out_len=1,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
)
|
||||
]
|
26
configs/models/hf_internlm/hf_internlm2_base_7b.py
Normal file
26
configs/models/hf_internlm/hf_internlm2_base_7b.py
Normal file
@ -0,0 +1,26 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-base-7b-hf',
|
||||
path="internlm/internlm2-base-7b",
|
||||
tokenizer_path='internlm/internlm2-base-7b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
min_out_len=1,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
35
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
Normal file
35
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
Normal file
@ -0,0 +1,35 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-1.8b-sft-hf',
|
||||
path="internlm/internlm2-chat-1_8b-sft",
|
||||
tokenizer_path='internlm/internlm2-chat-1_8b-sft',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
@ -4,7 +4,6 @@ from opencompass.models import HuggingFaceCausalLM
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
|
||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
|
35
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
Normal file
35
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
Normal file
@ -0,0 +1,35 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-20b-sft-hf',
|
||||
path="internlm/internlm2-chat-20b-sft",
|
||||
tokenizer_path='internlm/internlm2-chat-20b-sft',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
@ -0,0 +1,36 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
|
||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-20b-hf',
|
||||
path="internlm/internlm2-chat-20b",
|
||||
tokenizer_path='internlm/internlm2-chat-20b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
@ -4,7 +4,6 @@ from opencompass.models import HuggingFaceCausalLM
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
|
||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
|
35
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
Normal file
35
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
Normal file
@ -0,0 +1,35 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-7b-sft-hf',
|
||||
path="internlm/internlm2-chat-7b-sft",
|
||||
tokenizer_path='internlm/internlm2-chat-7b-sft',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
@ -0,0 +1,36 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
|
||||
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-7b-hf',
|
||||
path="internlm/internlm2-chat-7b",
|
||||
tokenizer_path='internlm/internlm2-chat-7b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
35
configs/models/hf_internlm/hf_internlm2_chat_math_20b.py
Normal file
35
configs/models/hf_internlm/hf_internlm2_chat_math_20b.py
Normal file
@ -0,0 +1,35 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
|
||||
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-math-20b-hf',
|
||||
path="internlm/internlm2-math-20b",
|
||||
tokenizer_path='internlm/internlm2-math-20b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
end_str='[UNUSED_TOKEN_145]',
|
||||
)
|
||||
]
|
@ -0,0 +1,36 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
|
||||
dict(role='SYSTEM', begin='[UNUSED_TOKEN_146]system\n', end='[UNUSED_TOKEN_145]\n'),
|
||||
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-math-20b-hf',
|
||||
path="internlm/internlm2-math-20b",
|
||||
tokenizer_path='internlm/internlm2-math-20b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
end_str='[UNUSED_TOKEN_145]',
|
||||
)
|
||||
]
|
35
configs/models/hf_internlm/hf_internlm2_chat_math_7b.py
Normal file
35
configs/models/hf_internlm/hf_internlm2_chat_math_7b.py
Normal file
@ -0,0 +1,35 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
|
||||
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-math-7b-hf',
|
||||
path="internlm/internlm2-math-7b",
|
||||
tokenizer_path='internlm/internlm2-math-7b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='[UNUSED_TOKEN_145]',
|
||||
)
|
||||
]
|
@ -0,0 +1,36 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
|
||||
dict(role='SYSTEM', begin='[UNUSED_TOKEN_146]system\n', end='[UNUSED_TOKEN_145]\n'),
|
||||
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
|
||||
],
|
||||
eos_token_id=92542
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm2-chat-math-7b-hf',
|
||||
path="internlm/internlm2-math-7b",
|
||||
tokenizer_path='internlm/internlm2-math-7b',
|
||||
model_kwargs=dict(
|
||||
trust_remote_code=True,
|
||||
device_map='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='[UNUSED_TOKEN_145]',
|
||||
)
|
||||
]
|
@ -19,16 +19,17 @@ models = [
|
||||
torch_dtype='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='right',
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
batch_padding=False,
|
||||
max_out_len=1024,
|
||||
max_out_len=100,
|
||||
max_seq_len=4096,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
end_str='</s>',
|
||||
)
|
||||
]
|
||||
|
@ -19,16 +19,16 @@ models = [
|
||||
torch_dtype='auto',
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='right',
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
batch_padding=False,
|
||||
max_out_len=1024,
|
||||
max_seq_len=8192,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
end_str='</s>',
|
||||
)
|
||||
]
|
||||
|
33
configs/models/others/hf_openchat_35_0106.py
Normal file
33
configs/models/others/hf_openchat_35_0106.py
Normal file
@ -0,0 +1,33 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='GPT4 Correct User: ', end='<|end_of_turn|>'),
|
||||
dict(role="BOT", begin="GPT4 Correct Assistant: ", end='<|end_of_turn|>', generate=True),
|
||||
],
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
abbr='openchat-3.5-0106-hf',
|
||||
type=HuggingFaceCausalLM,
|
||||
path='openchat/openchat-3.5-0106',
|
||||
tokenizer_path='openchat/openchat-3.5-0106',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|end_of_turn|>',
|
||||
)
|
||||
]
|
33
configs/models/others/hf_openchat_35_1210.py
Normal file
33
configs/models/others/hf_openchat_35_1210.py
Normal file
@ -0,0 +1,33 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='GPT4 Correct User: ', end='<|end_of_turn|>'),
|
||||
dict(role="BOT", begin="GPT4 Correct Assistant: ", end='<|end_of_turn|>', generate=True),
|
||||
],
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
abbr='openchat-3.5-1210-hf',
|
||||
type=HuggingFaceCausalLM,
|
||||
path='openchat/openchat-3.5-1210',
|
||||
tokenizer_path='openchat/openchat-3.5-1210',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|end_of_turn|>',
|
||||
)
|
||||
]
|
24
configs/models/others/hf_orionstar_14b_base.py
Normal file
24
configs/models/others/hf_orionstar_14b_base.py
Normal file
@ -0,0 +1,24 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
models = [
|
||||
dict(
|
||||
abbr='orionstar-14b-base-hf',
|
||||
type=HuggingFaceCausalLM,
|
||||
path='OrionStarAI/Orion-14B-Base',
|
||||
tokenizer_path='OrionStarAI/Orion-14B-Base',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
min_out_len=1,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
)
|
||||
]
|
@ -11,7 +11,7 @@ _meta_template = dict(
|
||||
|
||||
models = [
|
||||
dict(
|
||||
abbr='telechat-7b-hf',
|
||||
abbr='telechat-7b-hf--rerun',
|
||||
type=HuggingFaceCausalLM,
|
||||
path='Tele-AI/telechat-7B',
|
||||
tokenizer_path='Tele-AI/telechat-7B',
|
||||
|
@ -17,7 +17,7 @@ models = [
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
min_out_len=3,
|
||||
min_out_len=1,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=4, num_procs=1),
|
||||
|
25
configs/models/qwen/hf_qwen2_beta_0_5b.py
Normal file
25
configs/models/qwen/hf_qwen2_beta_0_5b.py
Normal file
@ -0,0 +1,25 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-0.5b-hf',
|
||||
path="Qwen/Qwen2-beta-0_5B",
|
||||
tokenizer_path='Qwen/Qwen2-beta-0_5B',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
34
configs/models/qwen/hf_qwen2_beta_0_5b_chat.py
Normal file
34
configs/models/qwen/hf_qwen2_beta_0_5b_chat.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-0.5b-chat-hf',
|
||||
path="Qwen/Qwen2-beta-0_5B-Chat",
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
25
configs/models/qwen/hf_qwen2_beta_14b.py
Normal file
25
configs/models/qwen/hf_qwen2_beta_14b.py
Normal file
@ -0,0 +1,25 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-14b-hf',
|
||||
path="Qwen/Qwen2-beta-14B",
|
||||
tokenizer_path='Qwen/Qwen2-beta-14B',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
34
configs/models/qwen/hf_qwen2_beta_14b_chat.py
Normal file
34
configs/models/qwen/hf_qwen2_beta_14b_chat.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-14b-chat-hf',
|
||||
path="Qwen/Qwen2-beta-14B-Chat",
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
25
configs/models/qwen/hf_qwen2_beta_1_8b.py
Normal file
25
configs/models/qwen/hf_qwen2_beta_1_8b.py
Normal file
@ -0,0 +1,25 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-1.8b-hf',
|
||||
path="Qwen/Qwen2-beta-1_8B",
|
||||
tokenizer_path='Qwen/Qwen2-beta-1_8B',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
34
configs/models/qwen/hf_qwen2_beta_1_8b_chat.py
Normal file
34
configs/models/qwen/hf_qwen2_beta_1_8b_chat.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-1.8b-chat-hf',
|
||||
path="Qwen/Qwen2-beta-1_8B-Chat",
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
25
configs/models/qwen/hf_qwen2_beta_4b.py
Normal file
25
configs/models/qwen/hf_qwen2_beta_4b.py
Normal file
@ -0,0 +1,25 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-4b-hf',
|
||||
path="Qwen/Qwen2-beta-4B",
|
||||
tokenizer_path='Qwen/Qwen2-beta-4B',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
34
configs/models/qwen/hf_qwen2_beta_4b_chat.py
Normal file
34
configs/models/qwen/hf_qwen2_beta_4b_chat.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-4b-chat-hf',
|
||||
path="Qwen/Qwen2-beta-4B-Chat",
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
25
configs/models/qwen/hf_qwen2_beta_72b.py
Normal file
25
configs/models/qwen/hf_qwen2_beta_72b.py
Normal file
@ -0,0 +1,25 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-72b-hf',
|
||||
path="Qwen/Qwen2-beta-72B",
|
||||
tokenizer_path='Qwen/Qwen2-beta-72B',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=4, num_procs=1),
|
||||
)
|
||||
]
|
34
configs/models/qwen/hf_qwen2_beta_72b_chat.py
Normal file
34
configs/models/qwen/hf_qwen2_beta_72b_chat.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-72b-chat-hf',
|
||||
path="Qwen/Qwen2-beta-72B-Chat",
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=4, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
25
configs/models/qwen/hf_qwen2_beta_7b.py
Normal file
25
configs/models/qwen/hf_qwen2_beta_7b.py
Normal file
@ -0,0 +1,25 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-7b-hf',
|
||||
path="Qwen/Qwen2-beta-7B",
|
||||
tokenizer_path='Qwen/Qwen2-beta-7B',
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
34
configs/models/qwen/hf_qwen2_beta_7b_chat.py
Normal file
34
configs/models/qwen/hf_qwen2_beta_7b_chat.py
Normal file
@ -0,0 +1,34 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='qwen2-beta-7b-chat-hf',
|
||||
path="Qwen/Qwen2-beta-7B-Chat",
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True
|
||||
),
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,
|
||||
),
|
||||
meta_template=_meta_template,
|
||||
pad_token_id=151645,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
@ -17,6 +17,7 @@ models = [
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151643,
|
||||
min_out_len=1,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
|
@ -17,6 +17,7 @@ models = [
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151643,
|
||||
min_out_len=1,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
|
@ -5,6 +5,7 @@ _meta_template = dict(
|
||||
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
|
||||
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
@ -21,12 +22,14 @@ models = [
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,),
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151643,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
end_str='<|im_end|>',
|
||||
)
|
||||
]
|
||||
|
@ -17,6 +17,7 @@ models = [
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151643,
|
||||
min_out_len=1,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
|
@ -17,6 +17,7 @@ models = [
|
||||
use_fast=False,
|
||||
),
|
||||
pad_token_id=151643,
|
||||
min_out_len=1,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
|
16
configs/models/qwen/vllm_qwen2_beta_72b.py
Normal file
16
configs/models/qwen/vllm_qwen2_beta_72b.py
Normal file
@ -0,0 +1,16 @@
|
||||
from opencompass.models import VLLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=VLLM,
|
||||
abbr='qwen2-beta-72b-vllm',
|
||||
path="Qwen/Qwen2-beta-72B",
|
||||
model_kwargs=dict(tensor_parallel_size=4),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=32,
|
||||
generation_kwargs=dict(temperature=0),
|
||||
run_cfg=dict(num_gpus=4, num_procs=1),
|
||||
)
|
||||
]
|
26
configs/models/qwen/vllm_qwen2_beta_72b_chat.py
Normal file
26
configs/models/qwen/vllm_qwen2_beta_72b_chat.py
Normal file
@ -0,0 +1,26 @@
|
||||
from opencompass.models import VLLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
|
||||
],
|
||||
eos_token_id=151645,
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=VLLM,
|
||||
abbr='qwen2-beta-72b-chat-vllm',
|
||||
path="Qwen/Qwen2-beta-72B-Chat",
|
||||
model_kwargs=dict(tensor_parallel_size=4),
|
||||
meta_template=_meta_template,
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=32,
|
||||
generation_kwargs=dict(temperature=0),
|
||||
end_str='<|im_end|>',
|
||||
run_cfg=dict(num_gpus=4, num_procs=1),
|
||||
)
|
||||
]
|
@ -1,6 +1,5 @@
|
||||
from opencompass.models import HuggingFace
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFace,
|
||||
|
@ -18,30 +18,30 @@ agent_summary_groups = [
|
||||
|
||||
summarizer = dict(
|
||||
dataset_abbrs=[
|
||||
'agent',
|
||||
'math_acc_1_and_fill_in_blank-native',
|
||||
'math_perf_4_and_fill_in_blank-native',
|
||||
# '######## MathBench-Agent Accuracy ########', # category
|
||||
'math_acc_1_and_fill_in_blank-agent',
|
||||
'math_perf_4_and_fill_in_blank-agent',
|
||||
# '######## CIBench Template ########', # category
|
||||
'cibench_template:executable',
|
||||
'cibench_template:numeric_correct',
|
||||
'cibench_template:text_score',
|
||||
'cibench_template:vis_sim',
|
||||
# '######## CIBench Template Chinese ########', # category
|
||||
'cibench_template_cn:executable',
|
||||
'cibench_template_cn:numeric_correct',
|
||||
'cibench_template_cn:text_score',
|
||||
'cibench_template_cn:vis_sim',
|
||||
# '######## CIBench Template w/o NLTK ########', # category no text score becase it is only for nltk
|
||||
'cibench_template_wo_nltk:executable',
|
||||
'cibench_template_wo_nltk:numeric_correct',
|
||||
'cibench_template_wo_nltk:vis_sim',
|
||||
# '######## CIBench Template Chinese w/o NLTK ########', # category
|
||||
'cibench_template_cn_wo_nltk:executable',
|
||||
'cibench_template_cn_wo_nltk:numeric_correct',
|
||||
'cibench_template_cn_wo_nltk:vis_sim',
|
||||
# 'agent',
|
||||
# 'math_acc_1_and_fill_in_blank-native',
|
||||
# 'math_perf_4_and_fill_in_blank-native',
|
||||
# # '######## MathBench-Agent Accuracy ########', # category
|
||||
# 'math_acc_1_and_fill_in_blank-agent',
|
||||
# 'math_perf_4_and_fill_in_blank-agent',
|
||||
# # '######## CIBench Template ########', # category
|
||||
# 'cibench_template:executable',
|
||||
# 'cibench_template:numeric_correct',
|
||||
# 'cibench_template:text_score',
|
||||
# 'cibench_template:vis_sim',
|
||||
# # '######## CIBench Template Chinese ########', # category
|
||||
# 'cibench_template_cn:executable',
|
||||
# 'cibench_template_cn:numeric_correct',
|
||||
# 'cibench_template_cn:text_score',
|
||||
# 'cibench_template_cn:vis_sim',
|
||||
# # '######## CIBench Template w/o NLTK ########', # category no text score becase it is only for nltk
|
||||
# 'cibench_template_wo_nltk:executable',
|
||||
# 'cibench_template_wo_nltk:numeric_correct',
|
||||
# 'cibench_template_wo_nltk:vis_sim',
|
||||
# # '######## CIBench Template Chinese w/o NLTK ########', # category
|
||||
# 'cibench_template_cn_wo_nltk:executable',
|
||||
# 'cibench_template_cn_wo_nltk:numeric_correct',
|
||||
# 'cibench_template_cn_wo_nltk:vis_sim',
|
||||
# '######## T-Eval ########', # category
|
||||
['plugin_eval-p10', 'naive_average'],
|
||||
['plugin_eval-p10-instruct_v1', 'format_metric'],
|
||||
@ -68,6 +68,38 @@ summarizer = dict(
|
||||
['plugin_eval-p10-understand_str_v1_zh', 'args'],
|
||||
['plugin_eval-p10-reason_retrieve_understand_json_v1_zh', 'args'],
|
||||
['plugin_eval-p10-review_str_v1_zh', 'review_quality'],
|
||||
|
||||
# '######## MUS-T-Eval ########', # category
|
||||
['plugin_eval-mus-p10', 'naive_average'],
|
||||
['plugin_eval-mus-p10-instruct_v1', 'format_metric'],
|
||||
['plugin_eval-mus-p10-instruct_v1', 'args_em_metric'],
|
||||
['plugin_eval-mus-p10-plan_str_v1', 'f1_score'],
|
||||
['plugin_eval-mus-p10-plan_json_v1', 'f1_score'],
|
||||
['plugin_eval-mus-p10-reason_str_v1', 'thought'],
|
||||
['plugin_eval-mus-p10-reason_retrieve_understand_json_v1', 'thought'],
|
||||
['plugin_eval-mus-p10-retrieve_str_v1', 'name'],
|
||||
['plugin_eval-mus-p10-reason_retrieve_understand_json_v1', 'name'],
|
||||
['plugin_eval-mus-p10-understand_str_v1', 'args'],
|
||||
['plugin_eval-mus-p10-reason_retrieve_understand_json_v1', 'args'],
|
||||
['plugin_eval-mus-p10-review_str_v1', 'review_quality'],
|
||||
|
||||
['plugin_eval-mus-p10_zh', 'naive_average'],
|
||||
['plugin_eval-mus-p10-instruct_v1_zh', 'format_metric'],
|
||||
['plugin_eval-mus-p10-instruct_v1_zh', 'args_em_metric'],
|
||||
['plugin_eval-mus-p10-plan_str_v1_zh', 'f1_score'],
|
||||
['plugin_eval-mus-p10-plan_json_v1_zh', 'f1_score'],
|
||||
['plugin_eval-mus-p10-reason_str_v1_zh', 'thought'],
|
||||
['plugin_eval-mus-p10-reason_retrieve_understand_json_v1_zh', 'thought'],
|
||||
['plugin_eval-mus-p10-retrieve_str_v1_zh', 'name'],
|
||||
['plugin_eval-mus-p10-reason_retrieve_understand_json_v1_zh', 'name'],
|
||||
['plugin_eval-mus-p10-understand_str_v1_zh', 'args'],
|
||||
['plugin_eval-mus-p10-reason_retrieve_understand_json_v1_zh', 'args'],
|
||||
['plugin_eval-mus-p10-review_str_v1_zh', 'review_quality'],
|
||||
|
||||
# ['plugin_eval-p10', 'naive_average'],
|
||||
# ['plugin_eval-mus-p10', 'naive_average'],
|
||||
# ['plugin_eval-p10_zh', 'naive_average'],
|
||||
# ['plugin_eval-mus-p10_zh', 'naive_average'],
|
||||
],
|
||||
summary_groups=sum(
|
||||
[v for k, v in locals().items() if k.endswith("_summary_groups")], [])
|
||||
|
@ -1,7 +1,6 @@
|
||||
# This summarizer is used for `./datasets/compassbench_v1_knowledge/compassbench_v1_knowledge_gen`
|
||||
compassbench_v1_knowledge_names = [
|
||||
'compassbench_v1_knowledge-common_knowledge-single_choice_cn_circular',
|
||||
'compassbench_v1_knowledge-engineering-single_choice_cn_circular',
|
||||
'compassbench_v1_knowledge-humanity-single_choice_cn_circular',
|
||||
'compassbench_v1_knowledge-natural_science-single_choice_cn_circular',
|
||||
'compassbench_v1_knowledge-social_science-single_choice_cn_circular',
|
||||
@ -19,7 +18,6 @@ summarizer = dict(
|
||||
'knowledge_acc_1_and_cloze',
|
||||
['knowledge_cn', 'acc_1'],
|
||||
['compassbench_v1_knowledge-common_knowledge-single_choice_cn_circular', 'acc_1'],
|
||||
['compassbench_v1_knowledge-engineering-single_choice_cn_circular', 'acc_1'],
|
||||
['compassbench_v1_knowledge-humanity-single_choice_cn_circular', 'acc_1'],
|
||||
['compassbench_v1_knowledge-natural_science-single_choice_cn_circular', 'acc_1'],
|
||||
['compassbench_v1_knowledge-social_science-single_choice_cn_circular', 'acc_1'],
|
||||
@ -28,7 +26,6 @@ summarizer = dict(
|
||||
'knowledge_perf_4_and_cloze',
|
||||
['knowledge_cn', 'perf_4'],
|
||||
['compassbench_v1_knowledge-common_knowledge-single_choice_cn_circular', 'perf_4'],
|
||||
['compassbench_v1_knowledge-engineering-single_choice_cn_circular', 'perf_4'],
|
||||
['compassbench_v1_knowledge-humanity-single_choice_cn_circular', 'perf_4'],
|
||||
['compassbench_v1_knowledge-natural_science-single_choice_cn_circular', 'perf_4'],
|
||||
['compassbench_v1_knowledge-social_science-single_choice_cn_circular', 'perf_4'],
|
||||
|
@ -37,8 +37,8 @@ summarizer = dict(
|
||||
'language_acc_1_and_non_mcq',
|
||||
'language_en_acc_1_and_non_mcq',
|
||||
'language_zh_acc_1_and_non_mcq',
|
||||
['information_retrieval_en', 'score'],
|
||||
['information_retrieval_zh', 'score'],
|
||||
# ['information_retrieval_en', 'score'],
|
||||
# ['information_retrieval_zh', 'score'],
|
||||
['intention_recognition_en_circular', 'acc_origin'],
|
||||
['intention_recognition_zh_circular', 'acc_origin'],
|
||||
['sentiment_analysis_en_circular', 'acc_origin'],
|
||||
@ -54,8 +54,8 @@ summarizer = dict(
|
||||
'language_perf_4_and_non_mcq',
|
||||
'language_en_perf_4_and_non_mcq',
|
||||
'language_zh_perf_4_and_non_mcq',
|
||||
['information_retrieval_en', 'score'],
|
||||
['information_retrieval_zh', 'score'],
|
||||
# ['information_retrieval_en', 'score'],
|
||||
# ['information_retrieval_zh', 'score'],
|
||||
['intention_recognition_en_circular', 'perf_circular'],
|
||||
['intention_recognition_zh_circular', 'perf_circular'],
|
||||
['sentiment_analysis_en_circular', 'perf_circular'],
|
||||
|
@ -1,42 +1,65 @@
|
||||
compassbench_v1_reason_groups = [
|
||||
{'name': 'reasonbench_cn_logic_circular', 'subsets': ['reasonbench_cn_abductive_alphanlg_translated_circular', 'reasonbench_cn_deductive_bbh3obj_translated_circular', 'reasonbench_cn_deductive_logiqa_zh_circular', 'reasonbench_cn_inductive_deer_translated_circular', 'reasonbench_cn_inductive_selfgenerated_circular']},
|
||||
{'name': 'reasonbench_en_logic_circular', 'subsets': ['reasonbench_en_abductive_alphanlg_circular', 'reasonbench_en_deductive_bbh7obj_circular', 'reasonbench_en_deductive_logiqa_zh_translated_circular', 'reasonbench_en_deductive_ocnli_translated_circular', 'reasonbench_en_inductive_deer_circular', 'reasonbench_en_inductive_selfgenerated_circular']},
|
||||
{'name': 'reasonbench', 'subsets': ['reasonbench_cn_commonsense_circular', 'reasonbench_cn_logic_circular', 'reasonbench_en_commonsense_circular', 'reasonbench_en_logic_circular']},
|
||||
{'name': 'reasonbench_cn_abductive_circular', 'subsets': ['reasonbench_cn_abductive_alphanlg_translated_circular']},
|
||||
{'name': 'reasonbench_en_abductive_circular', 'subsets': ['reasonbench_en_abductive_alphanlg_circular']},
|
||||
{'name': 'reasonbench_cn_deductive_circular', 'subsets': ['reasonbench_cn_deductive_bbh3obj_translated_circular', 'reasonbench_cn_deductive_logiqa_zh_circular']},
|
||||
{'name': 'reasonbench_cn_inductive_circular', 'subsets': ['reasonbench_cn_inductive_deer_translated_circular', 'reasonbench_cn_inductive_selfgenerated_circular']},
|
||||
{'name': 'reasonbench_en_inductive_circular', 'subsets': ['reasonbench_en_inductive_deer_circular', 'reasonbench_en_inductive_selfgenerated_circular']},
|
||||
|
||||
{'name': 'reasonbench_cn_circular', 'subsets': ['reasonbench_cn_commonsense_circular', 'reasonbench_cn_abductive_circular', 'reasonbench_cn_deductive_circular', 'reasonbench_cn_inductive_circular']},
|
||||
{'name': 'reasonbench_en_circular', 'subsets': ['reasonbench_en_commonsense_circular', 'reasonbench_en_abductive_circular', 'reasonbench_en_deductive_logiqa_zh_translated_circular', 'reasonbench_en_inductive_circular']},
|
||||
{'name': 'reasonbench', 'subsets': ['reasonbench_cn_circular', 'reasonbench_en_circular']},
|
||||
]
|
||||
|
||||
summarizer = dict(
|
||||
dataset_abbrs=[
|
||||
['reasonbench', 'acc_origin'],
|
||||
['reasonbench_cn_circular', 'acc_origin'],
|
||||
['reasonbench_en_circular', 'acc_origin'],
|
||||
|
||||
['reasonbench_cn_commonsense_circular', 'acc_origin'],
|
||||
['reasonbench_cn_abductive_circular', 'acc_origin'],
|
||||
['reasonbench_cn_deductive_circular', 'acc_origin'],
|
||||
['reasonbench_cn_inductive_circular', 'acc_origin'],
|
||||
['reasonbench_en_commonsense_circular', 'acc_origin'],
|
||||
['reasonbench_cn_logic_circular', 'acc_origin'],
|
||||
['reasonbench_en_logic_circular', 'acc_origin'],
|
||||
['reasonbench_en_abductive_circular', 'acc_origin'],
|
||||
['reasonbench_en_deductive_logiqa_zh_translated_circular', 'acc_origin'],
|
||||
['reasonbench_en_inductive_circular', 'acc_origin'],
|
||||
|
||||
['reasonbench_cn_commonsense_circular', 'acc_origin'],
|
||||
['reasonbench_cn_abductive_alphanlg_translated_circular', 'acc_origin'],
|
||||
['reasonbench_cn_deductive_bbh3obj_translated_circular', 'acc_origin'],
|
||||
['reasonbench_cn_deductive_logiqa_zh_circular', 'acc_origin'],
|
||||
['reasonbench_cn_inductive_deer_translated_circular', 'acc_origin'],
|
||||
['reasonbench_cn_inductive_selfgenerated_circular', 'acc_origin'],
|
||||
['reasonbench_en_commonsense_circular', 'acc_origin'],
|
||||
['reasonbench_en_abductive_alphanlg_circular', 'acc_origin'],
|
||||
['reasonbench_en_deductive_bbh7obj_circular', 'acc_origin'],
|
||||
['reasonbench_en_deductive_logiqa_zh_translated_circular', 'acc_origin'],
|
||||
['reasonbench_en_deductive_ocnli_translated_circular', 'acc_origin'],
|
||||
['reasonbench_en_inductive_deer_circular', 'acc_origin'],
|
||||
['reasonbench_en_inductive_selfgenerated_circular', 'acc_origin'],
|
||||
|
||||
|
||||
['reasonbench', 'perf_circular'],
|
||||
['reasonbench_cn_circular', 'perf_circular'],
|
||||
['reasonbench_en_circular', 'perf_circular'],
|
||||
|
||||
['reasonbench_cn_commonsense_circular', 'perf_circular'],
|
||||
['reasonbench_cn_abductive_circular', 'perf_circular'],
|
||||
['reasonbench_cn_deductive_circular', 'perf_circular'],
|
||||
['reasonbench_cn_inductive_circular', 'perf_circular'],
|
||||
['reasonbench_en_commonsense_circular', 'perf_circular'],
|
||||
['reasonbench_cn_logic_circular', 'perf_circular'],
|
||||
['reasonbench_en_logic_circular', 'perf_circular'],
|
||||
['reasonbench_en_abductive_circular', 'perf_circular'],
|
||||
['reasonbench_en_deductive_logiqa_zh_translated_circular', 'perf_circular'],
|
||||
['reasonbench_en_inductive_circular', 'perf_circular'],
|
||||
|
||||
['reasonbench_cn_commonsense_circular', 'perf_circular'],
|
||||
['reasonbench_cn_abductive_alphanlg_translated_circular', 'perf_circular'],
|
||||
['reasonbench_cn_deductive_bbh3obj_translated_circular', 'perf_circular'],
|
||||
['reasonbench_cn_deductive_logiqa_zh_circular', 'perf_circular'],
|
||||
['reasonbench_cn_inductive_deer_translated_circular', 'perf_circular'],
|
||||
['reasonbench_cn_inductive_selfgenerated_circular', 'perf_circular'],
|
||||
['reasonbench_en_commonsense_circular', 'perf_circular'],
|
||||
['reasonbench_en_abductive_alphanlg_circular', 'perf_circular'],
|
||||
['reasonbench_en_deductive_bbh7obj_circular', 'perf_circular'],
|
||||
['reasonbench_en_deductive_logiqa_zh_translated_circular', 'perf_circular'],
|
||||
['reasonbench_en_deductive_ocnli_translated_circular', 'perf_circular'],
|
||||
['reasonbench_en_inductive_deer_circular', 'perf_circular'],
|
||||
['reasonbench_en_inductive_selfgenerated_circular', 'perf_circular'],
|
||||
],
|
||||
|
@ -17,6 +17,28 @@ _base_summary_groups = [
|
||||
['plugin_eval-instruct_v1', 'json_args_em_metric'],
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'plugin_eval-instruct_v1',
|
||||
'metric': 'string_metric',
|
||||
'subsets': [
|
||||
['plugin_eval-instruct_v1', 'string_format_metric'],
|
||||
['plugin_eval-instruct_v1', 'string_args_em_metric'],
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'plugin_eval-instruct_v1',
|
||||
'metric': 'json_metric',
|
||||
'subsets': [
|
||||
['plugin_eval-instruct_v1', 'json_format_metric'],
|
||||
['plugin_eval-instruct_v1', 'json_args_em_metric'],
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'copy_plugin_eval-review_str_v1',
|
||||
'subsets': [
|
||||
['plugin_eval-review_str_v1', 'review_quality'],
|
||||
],
|
||||
},
|
||||
{
|
||||
'name': 'plugin_eval',
|
||||
'subsets': [
|
||||
@ -31,6 +53,7 @@ _base_summary_groups = [
|
||||
['plugin_eval-understand_str_v1', 'args'],
|
||||
['plugin_eval-reason_retrieve_understand_json_v1', 'args'],
|
||||
['plugin_eval-review_str_v1', 'review_quality'],
|
||||
['copy_plugin_eval-review_str_v1', 'naive_average'], # a hack for review * 2
|
||||
]
|
||||
},
|
||||
]
|
||||
@ -62,3 +85,17 @@ for group in _base_summary_groups:
|
||||
group['name'] = group['name'].replace('plugin_eval', 'plugin_eval-p10') + '_zh'
|
||||
group['subsets'] = [[subset[0].replace('plugin_eval', 'plugin_eval-p10') + '_zh', subset[1]] for subset in group['subsets']]
|
||||
plugineval_summary_groups.append(group)
|
||||
|
||||
# base -mus-p10-
|
||||
for group in _base_summary_groups:
|
||||
group = deepcopy(group)
|
||||
group['name'] = group['name'].replace('plugin_eval', 'plugin_eval-mus-p10')
|
||||
group['subsets'] = [[subset[0].replace('plugin_eval', 'plugin_eval-mus-p10'), subset[1]] for subset in group['subsets']]
|
||||
plugineval_summary_groups.append(group)
|
||||
|
||||
# base -mus-p10- _zh
|
||||
for group in _base_summary_groups:
|
||||
group = deepcopy(group)
|
||||
group['name'] = group['name'].replace('plugin_eval', 'plugin_eval-mus-p10') + '_zh'
|
||||
group['subsets'] = [[subset[0].replace('plugin_eval', 'plugin_eval-mus-p10') + '_zh', subset[1]] for subset in group['subsets']]
|
||||
plugineval_summary_groups.append(group)
|
||||
|
74
configs/summarizers/groups/teval.py
Normal file
74
configs/summarizers/groups/teval.py
Normal file
@ -0,0 +1,74 @@
|
||||
from copy import deepcopy
|
||||
|
||||
_base_summary_groups = [
|
||||
{
|
||||
'name': 'teval-instruct_v1',
|
||||
'metric': 'format_metric',
|
||||
'subsets': [
|
||||
['teval-instruct_v1', 'string_format_metric'],
|
||||
['teval-instruct_v1', 'json_format_metric'],
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'teval-instruct_v1',
|
||||
'metric': 'args_em_metric',
|
||||
'subsets': [
|
||||
['teval-instruct_v1', 'string_args_em_metric'],
|
||||
['teval-instruct_v1', 'json_args_em_metric'],
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'teval-instruct_v1',
|
||||
'metric': 'string_metric',
|
||||
'subsets': [
|
||||
['teval-instruct_v1', 'string_format_metric'],
|
||||
['teval-instruct_v1', 'string_args_em_metric'],
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'teval-instruct_v1',
|
||||
'metric': 'json_metric',
|
||||
'subsets': [
|
||||
['teval-instruct_v1', 'json_format_metric'],
|
||||
['teval-instruct_v1', 'json_args_em_metric'],
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'copy_teval-review_str_v1',
|
||||
'subsets': [
|
||||
['teval-review_str_v1', 'review_quality'],
|
||||
],
|
||||
},
|
||||
{
|
||||
'name': 'teval',
|
||||
'subsets': [
|
||||
['teval-instruct_v1', 'format_metric'],
|
||||
['teval-instruct_v1', 'args_em_metric'],
|
||||
['teval-plan_str_v1', 'f1_score'],
|
||||
['teval-plan_json_v1', 'f1_score'],
|
||||
['teval-reason_str_v1', 'thought'],
|
||||
['teval-reason_retrieve_understand_json_v1', 'thought'],
|
||||
['teval-retrieve_str_v1', 'name'],
|
||||
['teval-reason_retrieve_understand_json_v1', 'name'],
|
||||
['teval-understand_str_v1', 'args'],
|
||||
['teval-reason_retrieve_understand_json_v1', 'args'],
|
||||
['teval-review_str_v1', 'review_quality'],
|
||||
['copy_teval-review_str_v1', 'naive_average'], # a hack for review * 2
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
teval_summary_groups = []
|
||||
|
||||
# base
|
||||
for group in _base_summary_groups:
|
||||
group = deepcopy(group)
|
||||
teval_summary_groups.append(group)
|
||||
|
||||
# base _zh
|
||||
for group in _base_summary_groups:
|
||||
group = deepcopy(group)
|
||||
group['name'] = group['name'] + '_zh'
|
||||
group['subsets'] = [[subset[0] + '_zh', subset[1]] for subset in group['subsets']]
|
||||
teval_summary_groups.append(group)
|
||||
|
@ -12,8 +12,22 @@ with read_base():
|
||||
from .groups.xiezhi import xiezhi_summary_groups
|
||||
|
||||
|
||||
other_summary_groups = []
|
||||
other_summary_groups.append({'name': 'Exam', 'subsets': ["ceval",'agieval','mmlu','cmmlu',"GaokaoBench",'ARC-c','ARC-e']})
|
||||
other_summary_groups.append({'name': 'Language', 'subsets': ['WiC','chid-dev','afqmc-dev','WSC','tydiqa-goldp','flores_100']})
|
||||
other_summary_groups.append({'name': 'Knowledge', 'subsets': ['BoolQ','commonsense_qa','triviaqa','nq']})
|
||||
other_summary_groups.append({'name': 'Understanding', 'subsets': ['C3','race-middle','race-high','openbookqa_fact','csl_dev','lcsts','Xsum','eprstmt-dev','lambada']})
|
||||
other_summary_groups.append({'name': 'Reasoning', 'subsets': ['cmnli','ocnli','AX_b','AX_g','RTE','COPA','ReCoRD','hellaswag','piqa','siqa','math','gsm8k','drop','openai_humaneval','mbpp',"bbh"]})
|
||||
other_summary_groups.append({'name': 'Overall', 'subsets': ['Exam', 'Language', 'Knowledge', 'Understanding', 'Reasoning']})
|
||||
|
||||
summarizer = dict(
|
||||
dataset_abbrs=[
|
||||
'Overall',
|
||||
'Exam',
|
||||
'Language',
|
||||
'Knowledge',
|
||||
'Understanding',
|
||||
'Reasoning',
|
||||
'--------- 考试 Exam ---------', # category
|
||||
# 'Mixed', # subcategory
|
||||
"ceval",
|
||||
|
23
configs/summarizers/mathbench_v1.py
Normal file
23
configs/summarizers/mathbench_v1.py
Normal file
@ -0,0 +1,23 @@
|
||||
summarizer = dict(
|
||||
dataset_abbrs=[
|
||||
'######## MathBench Accuracy ########', # category
|
||||
['mathbench-college-single_choice_cn', 'acc_1'],
|
||||
['mathbench-college-single_choice_en', 'acc_1'],
|
||||
['mathbench-high-single_choice_cn', 'acc_1'],
|
||||
['mathbench-high-single_choice_en', 'acc_1'],
|
||||
['mathbench-middle-single_choice_cn', 'acc_1'],
|
||||
['mathbench-middle-single_choice_en', 'acc_1'],
|
||||
['mathbench-primary-cloze_cn', 'accuracy'],
|
||||
['mathbench-primary-cloze_en', 'accuracy'],
|
||||
['mathbench-calculate-cloze_en', 'accuracy'],
|
||||
'######## MathBench CircularEval ########', # category
|
||||
['mathbench-college-single_choice_cn', 'perf_4'],
|
||||
['mathbench-college-single_choice_en', 'perf_4'],
|
||||
['mathbench-high-single_choice_cn', 'perf_4'],
|
||||
['mathbench-high-single_choice_en', 'perf_4'],
|
||||
['mathbench-middle-single_choice_cn', 'perf_4'],
|
||||
['mathbench-middle-single_choice_en', 'perf_4'],
|
||||
],
|
||||
summary_groups=sum(
|
||||
[v for k, v in locals().items() if k.endswith("_summary_groups")], [])
|
||||
)
|
36
configs/summarizers/plugineval.py
Normal file
36
configs/summarizers/plugineval.py
Normal file
@ -0,0 +1,36 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .groups.plugineval import plugineval_summary_groups
|
||||
|
||||
summarizer = dict(
|
||||
dataset_abbrs=[
|
||||
['plugin_eval', 'naive_average'],
|
||||
['plugin_eval-instruct_v1', 'string_metric'], # 指令跟随能力-string格式
|
||||
['plugin_eval-instruct_v1', 'json_metric'], # 指令跟随能力-json格式
|
||||
['plugin_eval-plan_str_v1', 'f1_score'], # 规划能力-string格式
|
||||
['plugin_eval-plan_json_v1', 'f1_score'], # 规划能力-json格式
|
||||
['plugin_eval-reason_str_v1', 'thought'], # 推理能力-string格式
|
||||
['plugin_eval-reason_retrieve_understand_json_v1', 'thought'], # 推理能力-json格式
|
||||
['plugin_eval-retrieve_str_v1', 'name'], # 检索能力-string格式
|
||||
['plugin_eval-reason_retrieve_understand_json_v1', 'name'], # 检索能力-json格式
|
||||
['plugin_eval-understand_str_v1', 'args'], # 理解能力-string格式
|
||||
['plugin_eval-reason_retrieve_understand_json_v1', 'args'], # 理解能力-json格式
|
||||
['plugin_eval-review_str_v1', 'review_quality'], # 反思能力-string格式
|
||||
|
||||
['plugin_eval_zh', 'naive_average'],
|
||||
['plugin_eval-instruct_v1_zh', 'string_metric'],
|
||||
['plugin_eval-instruct_v1_zh', 'json_metric'],
|
||||
['plugin_eval-plan_str_v1_zh', 'f1_score'],
|
||||
['plugin_eval-plan_json_v1_zh', 'f1_score'],
|
||||
['plugin_eval-reason_str_v1_zh', 'thought'],
|
||||
['plugin_eval-reason_retrieve_understand_json_v1_zh', 'thought'],
|
||||
['plugin_eval-retrieve_str_v1_zh', 'name'],
|
||||
['plugin_eval-reason_retrieve_understand_json_v1_zh', 'name'],
|
||||
['plugin_eval-understand_str_v1_zh', 'args'],
|
||||
['plugin_eval-reason_retrieve_understand_json_v1_zh', 'args'],
|
||||
['plugin_eval-review_str_v1_zh', 'review_quality'],
|
||||
],
|
||||
summary_groups=sum(
|
||||
[v for k, v in locals().items() if k.endswith("_summary_groups")], [])
|
||||
)
|
36
configs/summarizers/teval.py
Normal file
36
configs/summarizers/teval.py
Normal file
@ -0,0 +1,36 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .groups.teval import teval_summary_groups
|
||||
|
||||
summarizer = dict(
|
||||
dataset_abbrs=[
|
||||
['teval', 'naive_average'],
|
||||
['teval-instruct_v1', 'string_metric'], # 指令跟随能力-string格式
|
||||
['teval-instruct_v1', 'json_metric'], # 指令跟随能力-json格式
|
||||
['teval-plan_str_v1', 'f1_score'], # 规划能力-string格式
|
||||
['teval-plan_json_v1', 'f1_score'], # 规划能力-json格式
|
||||
['teval-reason_str_v1', 'thought'], # 推理能力-string格式
|
||||
['teval-reason_retrieve_understand_json_v1', 'thought'], # 推理能力-json格式
|
||||
['teval-retrieve_str_v1', 'name'], # 检索能力-string格式
|
||||
['teval-reason_retrieve_understand_json_v1', 'name'], # 检索能力-json格式
|
||||
['teval-understand_str_v1', 'args'], # 理解能力-string格式
|
||||
['teval-reason_retrieve_understand_json_v1', 'args'], # 理解能力-json格式
|
||||
['teval-review_str_v1', 'review_quality'], # 反思能力-string格式
|
||||
|
||||
['teval_zh', 'naive_average'],
|
||||
['teval-instruct_v1_zh', 'string_metric'],
|
||||
['teval-instruct_v1_zh', 'json_metric'],
|
||||
['teval-plan_str_v1_zh', 'f1_score'],
|
||||
['teval-plan_json_v1_zh', 'f1_score'],
|
||||
['teval-reason_str_v1_zh', 'thought'],
|
||||
['teval-reason_retrieve_understand_json_v1_zh', 'thought'],
|
||||
['teval-retrieve_str_v1_zh', 'name'],
|
||||
['teval-reason_retrieve_understand_json_v1_zh', 'name'],
|
||||
['teval-understand_str_v1_zh', 'args'],
|
||||
['teval-reason_retrieve_understand_json_v1_zh', 'args'],
|
||||
['teval-review_str_v1_zh', 'review_quality'],
|
||||
],
|
||||
summary_groups=sum(
|
||||
[v for k, v in locals().items() if k.endswith("_summary_groups")], [])
|
||||
)
|
@ -91,6 +91,7 @@ from .summedits import * # noqa: F401, F403
|
||||
from .summscreen import * # noqa: F401, F403
|
||||
from .svamp import * # noqa: F401, F403
|
||||
from .tabmwp import * # noqa: F401, F403
|
||||
from .teval import * # noqa: F401, F403
|
||||
from .TheoremQA import * # noqa: F401, F403
|
||||
from .tnews import * # noqa: F401, F403
|
||||
from .triviaqa import * # noqa: F401, F403
|
||||
|
@ -33,7 +33,7 @@ def gsm8k_dataset_postprocess(text: str) -> str:
|
||||
|
||||
@TEXT_POSTPROCESSORS.register_module('gsm8k')
|
||||
def gsm8k_postprocess(text: str) -> str:
|
||||
text = text.split('\n\n')[0]
|
||||
text = text.split('Question:')[0]
|
||||
text = text.split(' ')[::-1]
|
||||
flag = False
|
||||
ret = ''
|
||||
|
@ -263,9 +263,12 @@ class MBPPEvaluator(BaseEvaluator):
|
||||
def _process_answer(self, text):
|
||||
try:
|
||||
# for chatGLM related text
|
||||
text = eval(text)
|
||||
eval_text = eval(text)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
if isinstance(eval_text, str):
|
||||
text = eval_text
|
||||
# deal with code block
|
||||
if '```' in text:
|
||||
blocks = re.findall(r'```(.*?)```', text, re.DOTALL)
|
||||
|
58
opencompass/datasets/teval/__init__.py
Normal file
58
opencompass/datasets/teval/__init__.py
Normal file
@ -0,0 +1,58 @@
|
||||
import json
|
||||
import os.path as osp
|
||||
from typing import Dict, Optional
|
||||
|
||||
import mmengine
|
||||
from datasets import Dataset, DatasetDict
|
||||
|
||||
from opencompass.registry import TEXT_POSTPROCESSORS
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
class TEvalDataset(BaseDataset):
|
||||
|
||||
def __init__(self, reader_cfg: Optional[Dict] = {}, **kwargs):
|
||||
super().__init__(reader_cfg=reader_cfg, **kwargs)
|
||||
|
||||
def load(self, path: str, name: str):
|
||||
|
||||
dataset = DatasetDict()
|
||||
data = mmengine.load(osp.join(path, f'{name}.json'))
|
||||
raw_data = []
|
||||
for i in data.keys():
|
||||
origin_prompt = data[i]['origin_prompt']
|
||||
if isinstance(origin_prompt, str):
|
||||
origin_prompt = json.loads(origin_prompt)
|
||||
# Aligning the default roles of opencompass
|
||||
prompt = origin_prompt + [
|
||||
dict(role='assistant',
|
||||
content=str(data[i].get('ground_truth')))
|
||||
]
|
||||
raw_data.append({
|
||||
'prompt': prompt,
|
||||
'ground_truth': json.dumps(data[i])
|
||||
})
|
||||
dataset['test'] = Dataset.from_list(raw_data)
|
||||
dataset['train'] = Dataset.from_list(raw_data)
|
||||
return dataset
|
||||
|
||||
|
||||
|
||||
|
||||
@TEXT_POSTPROCESSORS.register_module('teval')
|
||||
def teval_postprocess(text: str) -> str:
|
||||
if isinstance(text, str):
|
||||
text = text.split('<eoa>')[0]
|
||||
text = text.split('<TOKENS_UNUSED_1>')[0]
|
||||
text = text.split('<|im_end|>')[0]
|
||||
text = text.split('\nuser')[0]
|
||||
text = text.split('\nUSER')[0]
|
||||
text = text.split('[INST]')[0]
|
||||
text = text.strip()
|
||||
if text.startswith('```json'):
|
||||
text = text[len('```json'):]
|
||||
text = text.strip('`').strip()
|
||||
if text[:2] == '{{' and text[-2:] == '}}':
|
||||
text = text[1:-1]
|
||||
return str(text)
|
5
opencompass/datasets/teval/evaluators/__init__.py
Normal file
5
opencompass/datasets/teval/evaluators/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
from .instruct_evaluator import InstructEvaluator
|
||||
from .planning_evaluator import PlanningEvaluator
|
||||
from .review_evaluator import ReviewEvaluator
|
||||
from .reason_retrieve_understand_evaluator import ReasonRetrieveUnderstandEvaluator
|
||||
__all__ = ['InstructEvaluator', 'PlanningEvaluator', 'ReviewEvaluator', 'ReasonRetrieveUnderstandEvaluator']
|
152
opencompass/datasets/teval/evaluators/instruct_evaluator.py
Normal file
152
opencompass/datasets/teval/evaluators/instruct_evaluator.py
Normal file
@ -0,0 +1,152 @@
|
||||
from collections import defaultdict
|
||||
from mmengine import load
|
||||
|
||||
from ..utils.template import parse_string
|
||||
from ..utils.format_load import format_load
|
||||
from ..schema import ResponseDataSample
|
||||
import ast
|
||||
import numpy as np
|
||||
|
||||
class InstructEvaluator:
|
||||
"""Instruct Following Evaluation
|
||||
|
||||
Args:
|
||||
dataset_path(str): File path of evaluation dataset.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dataset_path: str,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
self.dataset_path = dataset_path
|
||||
|
||||
def _load_dataset(self):
|
||||
self.dataset = []
|
||||
dataset = load(self.dataset_path)
|
||||
|
||||
for key in dataset.keys():
|
||||
datum = dataset[key]
|
||||
data_sample = self._process_response(datum)
|
||||
|
||||
self.dataset.append(
|
||||
dict(
|
||||
origin_prompt=datum["origin_prompt"],
|
||||
response_data_sample=data_sample))
|
||||
self.num_samples = len(self.dataset)
|
||||
|
||||
def _process_response(
|
||||
self,
|
||||
datum: dict,
|
||||
) -> ResponseDataSample:
|
||||
"""Process the response to needed format.
|
||||
|
||||
Args:
|
||||
datum(dict): inputs.
|
||||
|
||||
Returns:
|
||||
dict: Processed response data sample.
|
||||
"""
|
||||
|
||||
# Dict with keyword-only arguments.
|
||||
template = datum['template']
|
||||
# Generated response.
|
||||
pred_data = datum['prediction']
|
||||
# Response of ground truth.
|
||||
gt_data = datum['ground_truth']
|
||||
meta_data = datum['meta_data']
|
||||
|
||||
return ResponseDataSample(
|
||||
template=template, pred=pred_data, gt=gt_data, meta_data=meta_data)
|
||||
|
||||
def _evaluate(self, data_sample: dict) -> dict:
|
||||
metrics_result = dict()
|
||||
response_format = data_sample.meta_data['response_format']
|
||||
if response_format == 'json':
|
||||
pred_data = self.json_format_parse(data_sample)
|
||||
else:
|
||||
pred_data = self.string_format_parse(data_sample)
|
||||
|
||||
if pred_data is None:
|
||||
# directly set to 0 for all metrics
|
||||
metrics_result[f'{response_format}_format_metric'] = 0
|
||||
metrics_result[f'{response_format}_args_em_metric'] = 0
|
||||
return metrics_result
|
||||
|
||||
# Exact matching
|
||||
metrics_result[f'{response_format}_format_metric'] = 1
|
||||
metrics_result[f'{response_format}_args_em_metric'] = self.compute_args_em_metric(
|
||||
gt_action=data_sample.gt['action'], pred_action=pred_data['action'],
|
||||
gt_args=data_sample.gt['args'], pred_args=pred_data['args']
|
||||
)
|
||||
return metrics_result
|
||||
|
||||
def compute_args_em_metric(self, gt_action, pred_action, gt_args, pred_args):
|
||||
cnt = 0.
|
||||
if gt_action == pred_action:
|
||||
cnt += 1.
|
||||
num_args = len(gt_args) + 1 # 1 means action name match
|
||||
for gt_key in gt_args:
|
||||
pred_val = pred_args.get(gt_key, "")
|
||||
if pred_val == gt_args[gt_key]:
|
||||
cnt += 1.
|
||||
return cnt / num_args
|
||||
|
||||
def string_format_parse(self, data_sample):
|
||||
pred_data = data_sample.pred
|
||||
template = data_sample.template
|
||||
thought_start = template['thought_start']
|
||||
thought_end = template['thought_end']
|
||||
action_start = template['action_start']
|
||||
action_end = template['action_end']
|
||||
args_start = template['args_start']
|
||||
args_end = template['args_end']
|
||||
|
||||
parse_template = thought_start + "{thought}" + thought_end \
|
||||
+ action_start + "{action}" + action_end \
|
||||
+ args_start + "{args}" + args_end
|
||||
res = parse_string(parse_template, pred_data, allow_newline=True)
|
||||
try:
|
||||
if res is not None:
|
||||
args = ast.literal_eval(res['args'].strip())
|
||||
res['args'] = args if isinstance(args, dict) else {}
|
||||
res['action'] = res['action'].strip()
|
||||
return res
|
||||
except:
|
||||
return dict(thought=res['thought'], action=res['action'].strip(), args=dict())
|
||||
|
||||
def json_format_parse(self, data_sample):
|
||||
try:
|
||||
pred_data = format_load(data_sample.pred)
|
||||
template = data_sample.template
|
||||
new_data = dict()
|
||||
new_data['thought'] = pred_data[template['thought']]
|
||||
new_data['action'] = pred_data[template['action']]
|
||||
args = pred_data[template['args']]
|
||||
new_data['args'] = args if isinstance(args, dict) else {}
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
return new_data
|
||||
|
||||
def evaluate(self):
|
||||
self._load_dataset()
|
||||
results_list = []
|
||||
for data_sample in self.dataset:
|
||||
metrics_result = self._evaluate(data_sample['response_data_sample'])
|
||||
results_list.append(metrics_result)
|
||||
return self._post_process(results_list)
|
||||
|
||||
def _post_process(self, results_list):
|
||||
# list of dict to dict of list
|
||||
results_dict = defaultdict(list)
|
||||
{
|
||||
results_dict[key].append(sub[key])
|
||||
for sub in results_list for key in sub
|
||||
}
|
||||
metric_list = ['json_format_metric', 'json_args_em_metric',
|
||||
'string_format_metric', 'string_args_em_metric']
|
||||
for metric in metric_list:
|
||||
results_dict[metric] = np.round(np.mean(results_dict[metric]), decimals=4)
|
||||
return results_dict
|
394
opencompass/datasets/teval/evaluators/planning_evaluator.py
Normal file
394
opencompass/datasets/teval/evaluators/planning_evaluator.py
Normal file
@ -0,0 +1,394 @@
|
||||
from numpy import mean
|
||||
from mmengine import load
|
||||
from ..utils.format_load import format_load
|
||||
import itertools
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
import copy
|
||||
import re
|
||||
from tqdm import tqdm
|
||||
|
||||
from ..schema import ResponseDataSample
|
||||
from sentence_transformers import SentenceTransformer, util
|
||||
|
||||
|
||||
class PlanningEvaluator:
|
||||
"""Planning Evaluation
|
||||
Args:
|
||||
dataset_path(str): File path of evaluation dataset
|
||||
name_weight(float): the weight of action_name in bert_score match, default = 0.9
|
||||
args_weight(float): the weight of action_args in bert_score match, default = 0.1
|
||||
match_threshold(float): the threshold of matching
|
||||
match_strategy(str): matching method, can choose 'bertscore' or 'permutation'
|
||||
bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2".
|
||||
Refer to https://www.sbert.net/docs/pretrained_models.html for more models.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
dataset_path: str,
|
||||
name_weight = 0.75,
|
||||
args_weight = 0.25,
|
||||
match_threshold = 0.7,
|
||||
match_strategy: str = 'bertscore', # ["bertscore", "permutation"]
|
||||
bert_score_model: str = "all-mpnet-base-v2", # ['thenlper/gte-large-zh', 'all-mpnet-base-v2']
|
||||
default_prompt_type: str = 'json', # ["json", "ReWOO"]
|
||||
**kwargs,
|
||||
) -> None:
|
||||
self.bert_score_model = bert_score_model
|
||||
print(bert_score_model)
|
||||
self.dataset_path = dataset_path
|
||||
self.name_weight = name_weight
|
||||
self.args_weight = args_weight
|
||||
self.match_threshold = match_threshold
|
||||
self.default_prompt_type = default_prompt_type # ["json", "ReWOO"]
|
||||
assert match_strategy in ["bertscore", "permutation"], f"match strategy must in [\"bertscore\", \"permutation\"], but get {match_strategy}"
|
||||
self.match_strategy = match_strategy
|
||||
self.valid_data_count = None
|
||||
self.sentence_model = SentenceTransformer(self.bert_score_model)
|
||||
|
||||
def _load_dataset(self):
|
||||
self.dataset = []
|
||||
dataset = load(self.dataset_path)
|
||||
total_error = 0
|
||||
total_count = 0
|
||||
for key in dataset.keys():
|
||||
datum = dataset[key]
|
||||
data_sample, error = self._process_response(datum)
|
||||
total_error += error
|
||||
total_count += 1
|
||||
self.dataset.append(
|
||||
dict(response_data_sample=data_sample))
|
||||
|
||||
self.num_samples = len(self.dataset)
|
||||
print("total_data_count:", total_count, "valid_data_count:", total_count - total_error)
|
||||
self.valid_data_count = total_count - total_error
|
||||
|
||||
def format_load(self, data):
|
||||
r'''
|
||||
ensure evaluator can work correctly under any data input
|
||||
'''
|
||||
try:
|
||||
json_format = format_load(data, start_character='[', end_character=']')
|
||||
except Exception as e:
|
||||
return []
|
||||
if type(json_format) != list:
|
||||
return []
|
||||
for i in range(len(json_format)):
|
||||
try:
|
||||
json_format[i] = {
|
||||
'name': str(json_format[i]['name']),
|
||||
'id': int(json_format[i]['id']),
|
||||
'args': str(json_format[i]['args'])
|
||||
}
|
||||
except Exception as e:
|
||||
return []
|
||||
return json_format
|
||||
|
||||
def _process_response(
|
||||
self,
|
||||
datum,
|
||||
) -> ResponseDataSample:
|
||||
"""Process the response to needed format.
|
||||
Args:
|
||||
datum(dict): inputs.
|
||||
Returns:
|
||||
dict: Processed response data sample.
|
||||
"""
|
||||
|
||||
# Generated response, which can be a string or list
|
||||
pred_data = datum['prediction']
|
||||
# Response of ground truth, which can be a string or list
|
||||
gt_data = datum['ground_truth']
|
||||
# prompt_type: The type of planning prompt, supporting "json" and "ReWOO"
|
||||
if "meta" in datum:
|
||||
prompt_type = datum["meta"].get("prompt_type", self.default_prompt_type)
|
||||
else:
|
||||
prompt_type = self.default_prompt_type
|
||||
|
||||
error = 0
|
||||
pred = dict()
|
||||
gt = dict()
|
||||
gt['planning'] = self.format_load(gt_data)
|
||||
if prompt_type == 'json':
|
||||
pred['planning'] = self.format_load(pred_data)
|
||||
if pred['planning'] == [] or gt['planning'] == []:
|
||||
error = 1
|
||||
|
||||
elif prompt_type == 'ReWOO':
|
||||
"""
|
||||
This type is deprecated
|
||||
The planning prediction data should in this format:
|
||||
Plan 1: <str> description about the first action
|
||||
Dependency 1: <list[number]> the first action depends on which previous actions
|
||||
Action 1: #E1 = api_name1(args1)
|
||||
...
|
||||
Which will be passed only if "number of plan lines == number of dependency lines == number of action lines"
|
||||
The passed data's format is:
|
||||
[
|
||||
dict(
|
||||
id = i,
|
||||
name = curr_name,
|
||||
args = args_str
|
||||
)
|
||||
...
|
||||
]
|
||||
|
||||
The golden answer prediction is a json that is the same as the json format.
|
||||
"""
|
||||
thoughts = re.findall(r'(Plan [0-9]+: .+)', pred_data)
|
||||
dependencies = re.findall(r'(Dependency [0-9]+: .+)', pred_data)
|
||||
action_units = re.findall(r'Action [0-9]+: (.+)', pred_data)
|
||||
|
||||
if not (len(thoughts) == len(dependencies) and len(thoughts) == len(action_units)):
|
||||
pred['planning'] = []
|
||||
gt['planning'] = []
|
||||
return ResponseDataSample(template = '', pred=pred, gt=gt), 1
|
||||
|
||||
plan_action = []
|
||||
for i in range(len(action_units)):
|
||||
dependency_list = re.findall(r'Dependency [0-9]+: (.+)', dependencies[i])
|
||||
if action_units[i][0] == '#':
|
||||
# The action has a return #E
|
||||
args_str_list = re.findall(r'#E[0-9]+ = .+\((.+)\)', action_units[i])
|
||||
name_list = re.findall(r'#E[0-9]+ = (.+)\(', action_units[i])
|
||||
else:
|
||||
# The action does not have a return
|
||||
args_str_list = re.findall(r'.+\((.+)\)', action_units[i])
|
||||
name_list = re.findall(r'(.+)\(', action_units[i])
|
||||
if (len(name_list) > 0):
|
||||
curr_name = name_list[0]
|
||||
else:
|
||||
curr_name = ""
|
||||
if (len(args_str_list) > 0):
|
||||
args_str = "{" + args_str_list[0] + "}"
|
||||
else:
|
||||
args_str = "{}"
|
||||
if (len(dependency_list) > 0):
|
||||
dependency_str = dependency_list[0]
|
||||
else:
|
||||
dependency_str = ""
|
||||
dependency = re.findall('([0-9]+)', dependency_str)
|
||||
dependency = list(set([int(x) - 1 for x in dependency]))
|
||||
plan_action.append(
|
||||
dict(
|
||||
id = i,
|
||||
name = curr_name,
|
||||
prev = dependency,
|
||||
args = args_str
|
||||
))
|
||||
pred['planning'] = plan_action
|
||||
#Turn dict into args str
|
||||
for i in range(len(gt['planning'])):
|
||||
args_str = ""
|
||||
if type(gt['planning'][i]['args']) == str:
|
||||
args_dict = eval(gt['planning'][i]['args'])
|
||||
else:
|
||||
assert type(gt['planning'][i]['args']) == dict
|
||||
args_dict = gt['planning'][i]['args']
|
||||
for it in args_dict:
|
||||
if args_str == "": args_str += f"{it}=\"{args_dict[it]}\""
|
||||
else: args_str += f", {it}=\"{args_dict[it]}\""
|
||||
gt['planning'][i]['args'] = '{' + args_str + '}'
|
||||
|
||||
elif prompt_type == 'str':
|
||||
pred_data_format = pred_data.replace('. ', '\n').split('\n')
|
||||
pred_actions = []
|
||||
for pred_step in pred_data_format:
|
||||
first_occur_time = 1e9
|
||||
pred_action = ""
|
||||
for api_name in datum['meta']['API_list']:
|
||||
occur_time = pred_step.find(api_name)
|
||||
if occur_time != -1 and occur_time < first_occur_time:
|
||||
first_occur_time = occur_time
|
||||
pred_action = api_name
|
||||
if pred_action != "":
|
||||
pred_actions.append({
|
||||
'id': len(pred_actions),
|
||||
'name': pred_action,
|
||||
'args': pred_step
|
||||
})
|
||||
pred['planning'] = pred_actions
|
||||
if len(pred['planning']) == 0:
|
||||
error = 1
|
||||
else:
|
||||
raise NotImplementedError(f"Currently, we only support json and ReWOO format, but get {prompt_type}")
|
||||
|
||||
return ResponseDataSample(template = '', pred=pred, gt=gt), error
|
||||
|
||||
def _evaluate(self, data_sample) -> dict:
|
||||
if self.match_strategy == 'bertscore':
|
||||
metrics_result = self.bertscore_match(
|
||||
data_sample.pred['planning'], data_sample.gt['planning'])
|
||||
elif self.match_strategy == 'permutation':
|
||||
metrics_result = self.permutation_match(
|
||||
data_sample.pred['planning'], data_sample.gt['planning'])
|
||||
else:
|
||||
raise NotImplementedError
|
||||
if len(data_sample.pred['planning']) == 0 or len(data_sample.gt['planning']) == 0:
|
||||
metrics_result['parse_rate'] = 0
|
||||
else:
|
||||
metrics_result['parse_rate'] = 1
|
||||
return metrics_result
|
||||
|
||||
def evaluate(self):
|
||||
self._load_dataset()
|
||||
results_list = []
|
||||
for data_sample in tqdm(self.dataset):
|
||||
metrics_result = self._evaluate(
|
||||
data_sample['response_data_sample'])
|
||||
results_list.append(metrics_result)
|
||||
return self._post_process(results_list)
|
||||
|
||||
def permutation_match(self, pred_plan, gt_plan) -> dict:
|
||||
'''
|
||||
The function calculates all the permutation matches' score and selects the max f1_score;
|
||||
Since permutation is time consuming, we truncate the length of plans to 9
|
||||
'''
|
||||
if pred_plan[-1]['name'] != 'FinishAction':
|
||||
pred_plan.append(
|
||||
{'id': len(pred_plan), 'prev': [], 'name': 'FinishAction', 'args': r'\{\}'}
|
||||
)
|
||||
|
||||
if gt_plan[-1]['name'] != 'FinishAction':
|
||||
gt_plan.append(
|
||||
{'id': len(gt_plan), 'prev': [], 'name': 'FinishAction', 'args': r'\{\}'}
|
||||
)
|
||||
|
||||
# truncate plans to 9 since it is too long for permutation.
|
||||
if len(pred_plan) > 9: pred_plan = pred_plan[:9]
|
||||
if len(gt_plan) > 9: gt_plan = pred_plan[:9]
|
||||
|
||||
pred_plan = sorted(pred_plan, key=lambda x: x['id'])
|
||||
gt_plan = sorted(gt_plan, key=lambda x: x['id'])
|
||||
len_pred = len(pred_plan)
|
||||
len_gt = len(gt_plan)
|
||||
map_id_max = max(len_pred, len_gt)
|
||||
numbers = [i for i in range(map_id_max)]
|
||||
perms = itertools.permutations(numbers, len_pred)
|
||||
gt_prev_count, pred_prev_count = 0, 0
|
||||
for i in range(len_gt):
|
||||
gt_plan[i]['prev'].append(i)
|
||||
gt_prev_count += len(gt_plan[i]['prev'])
|
||||
for i in range(len_pred):
|
||||
pred_plan[i]['prev'].append(i)
|
||||
pred_prev_count += len(pred_plan[i]['prev'])
|
||||
if gt_prev_count == 0 or pred_prev_count == 0:
|
||||
return {
|
||||
'precision': 0,
|
||||
'recall': 0,
|
||||
'f1_score': 0
|
||||
}
|
||||
max_recall, max_precision, max_f1 = 0, 0, 0
|
||||
for perm in perms:
|
||||
correct_count = 0
|
||||
for i in range(len_pred):
|
||||
if perm[i] >= len_gt:
|
||||
continue
|
||||
for j in pred_plan[i]['prev']:
|
||||
if perm[j] in gt_plan[perm[i]]['prev']:
|
||||
correct_count += 1
|
||||
now_recall, now_precision = correct_count / gt_prev_count, correct_count / pred_prev_count
|
||||
if now_recall + now_precision == 0:
|
||||
continue
|
||||
now_f1 = 2 * now_recall * now_precision / (now_recall + now_precision)
|
||||
if now_f1 > max_f1:
|
||||
max_f1, max_recall, max_precision = now_f1, now_recall, now_precision
|
||||
return {
|
||||
'precision': max_precision,
|
||||
'recall': max_recall,
|
||||
'f1_score': max_f1
|
||||
}
|
||||
|
||||
def bertscore_match(self, pred_plan, gt_plan) -> dict:
|
||||
"""
|
||||
Calculate the similarity between predicted plan and golden answer,
|
||||
A plan can be regarded a sequence of actions, and each action has a name and args.
|
||||
Firstly, use bertscore to calculate pointwise similarity by:
|
||||
similarity(u, v) = bertscore(u.name, v.name) * name_weight + bertscore(u.args, v.args) * args_weight;
|
||||
Secondly, use Hungarian matching to match the points;
|
||||
Finally, use LIS to calculate the number of matched nodes.
|
||||
"""
|
||||
if len(pred_plan) == 0 or len(gt_plan) == 0:
|
||||
return {
|
||||
'precision': 0,
|
||||
'recall': 0,
|
||||
'f1_score': 0
|
||||
}
|
||||
|
||||
pred_plan = copy.deepcopy(sorted(pred_plan, key=lambda x: x['id']))
|
||||
gt_plan = copy.deepcopy(sorted(gt_plan, key=lambda x: x['id']))
|
||||
|
||||
#Add end action
|
||||
#Currently it is hard-code
|
||||
if pred_plan[-1]['name'] == 'FinishAction':
|
||||
pred_plan = pred_plan[:-1]
|
||||
if gt_plan[-1]['name'] == 'FinishAction':
|
||||
gt_plan = gt_plan[:-1]
|
||||
#The total counts of nodes and edges.
|
||||
len_pred = len(pred_plan)
|
||||
len_gt = len(gt_plan)
|
||||
|
||||
bert_score_matrix = np.zeros((len_pred, len_gt))
|
||||
name_pred, args_pred = [], []
|
||||
name_gt, args_gt = [], []
|
||||
for i in range(len_pred):
|
||||
name_pred.append(pred_plan[i]['name'])
|
||||
args_pred.append(str(pred_plan[i]['args']))
|
||||
for i in range(len_gt):
|
||||
name_gt.append(gt_plan[i]['name'])
|
||||
args_gt.append(str(gt_plan[i]['args']))
|
||||
name_pred_emb = self.sentence_model.encode(name_pred, convert_to_tensor=True)
|
||||
name_gt_emb = self.sentence_model.encode(name_gt, convert_to_tensor=True)
|
||||
args_pred_emb = self.sentence_model.encode(args_pred, convert_to_tensor=True)
|
||||
args_gt_emb = self.sentence_model.encode(args_gt, convert_to_tensor=True)
|
||||
name_cosine_scores = np.maximum(util.cos_sim(name_pred_emb, name_gt_emb).cpu().numpy(), 0)
|
||||
args_cosine_scores = np.maximum(util.cos_sim(args_pred_emb, args_gt_emb).cpu().numpy(), 0)
|
||||
for i in range(len_pred):
|
||||
for j in range(len_gt):
|
||||
bert_score_matrix[i][j] = \
|
||||
name_cosine_scores[i][j] * self.name_weight \
|
||||
+ args_cosine_scores[i][j] * self.args_weight
|
||||
G = nx.Graph()
|
||||
for i in range(len_pred):
|
||||
for j in range(len_gt):
|
||||
if bert_score_matrix[i][j] > self.match_threshold:
|
||||
G.add_edge(i, str(j), weight=bert_score_matrix[i][j])
|
||||
max_weight_matching = nx.max_weight_matching(G)
|
||||
|
||||
pred_to_gt_mapping = dict()
|
||||
for key in max_weight_matching:
|
||||
if type(key[0]) == int:
|
||||
pred_to_gt_mapping[int(key[0])] = int(key[1])
|
||||
else:
|
||||
pred_to_gt_mapping[int(key[1])] = int(key[0])
|
||||
|
||||
#If a prediction node does not match any golden answer node, we mark the node as -1.
|
||||
for i in range(len_pred):
|
||||
if i not in pred_to_gt_mapping:
|
||||
pred_to_gt_mapping[i] = -1
|
||||
#Calculate how many nodes are matched by Longest Increasing Subsequence (LIS)
|
||||
dp = np.ones(len_pred)
|
||||
for i in range(len_pred):
|
||||
for j in range(i):
|
||||
if pred_to_gt_mapping[i] == -1 or pred_to_gt_mapping[j] == -1:
|
||||
continue
|
||||
if pred_to_gt_mapping[i] > pred_to_gt_mapping[j]:
|
||||
dp[i] = max(dp[i], dp[j] + 1)
|
||||
correct_count = int(max(dp))
|
||||
|
||||
recall, precision = correct_count / len(gt_plan), correct_count / len(pred_plan)
|
||||
f1_score = 2 * recall * precision / (recall + precision)
|
||||
result = {
|
||||
'precision': precision,
|
||||
'recall': recall,
|
||||
'f1_score': f1_score
|
||||
}
|
||||
return result
|
||||
|
||||
def _post_process(self, results_list):
|
||||
# list of dict to dict of list
|
||||
results = dict()
|
||||
planning_metric_keys = ["precision", "recall", "f1_score", 'parse_rate']
|
||||
for key in planning_metric_keys:
|
||||
results[key] = mean([result[key] for result in results_list])
|
||||
return results
|
@ -0,0 +1,455 @@
|
||||
import json
|
||||
from numpy import mean
|
||||
from mmengine import load
|
||||
import numpy as np
|
||||
import json
|
||||
import re
|
||||
from tqdm import tqdm
|
||||
|
||||
from ..schema import ResponseDataSample
|
||||
from ..utils.format_load import format_load
|
||||
from sentence_transformers import SentenceTransformer, util
|
||||
|
||||
|
||||
def input_postprocess(text: str) -> str:
|
||||
if isinstance(text, str):
|
||||
text = text.split('<|')[0]
|
||||
text = text.split('<eoa>\n')[0]
|
||||
text = text.split('<TOKENS_UNUSED_1>\n')[0]
|
||||
text = text.split('<|im_end|>')[0]
|
||||
if len(text) > 1 and text[:2] == '{{' and text[-2:] == '}}':
|
||||
text = text[1:-1]
|
||||
while len(text) > 0 and text[-1] == '\n':
|
||||
text = text[:-1]
|
||||
return str(text)
|
||||
|
||||
class ReasonRetrieveUnderstandEvaluator:
|
||||
"""Planning Evaluation
|
||||
Args:
|
||||
dataset_path(str): File path of evaluation dataset
|
||||
bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2".
|
||||
Refer to https://www.sbert.net/docs/pretrained_models.html for more models.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
dataset_path: str,
|
||||
bert_score_model: str = "all-mpnet-base-v2", # ['thenlper/gte-large-zh', 'all-mpnet-base-v2']
|
||||
default_prompt_type: str = 'json',
|
||||
eval_type: str = 'reason',
|
||||
**kwargs,
|
||||
) -> None:
|
||||
self.bert_score_model = bert_score_model
|
||||
print(bert_score_model)
|
||||
self.dataset_path = dataset_path
|
||||
# self.bertscore = evaluate.load('bertscore')
|
||||
self.default_prompt_type = default_prompt_type # ["json", "str"]
|
||||
self.eval_type = eval_type
|
||||
self.valid_data_count = None
|
||||
self.sentence_model = SentenceTransformer(self.bert_score_model)
|
||||
|
||||
def _load_dataset(self):
|
||||
self.dataset = []
|
||||
dataset = load(self.dataset_path)
|
||||
total_error = 0
|
||||
total_count = 0
|
||||
for key in dataset.keys():
|
||||
datum = dataset[key]
|
||||
data_sample, error = self._process_response(datum)
|
||||
total_error += error
|
||||
total_count += 1
|
||||
self.dataset.append(
|
||||
dict(response_data_sample=data_sample))
|
||||
|
||||
self.num_samples = len(self.dataset)
|
||||
# print("total_data_count:", total_count, "valid_data_count:", total_count - total_error)
|
||||
self.valid_data_count = total_count - total_error
|
||||
|
||||
def format_load(self, data):
|
||||
r'''
|
||||
ensure evaluator can work correctly under any data input
|
||||
'''
|
||||
try:
|
||||
json_format = format_load(data, start_character='{', end_character='}')
|
||||
except Exception as e:
|
||||
return {}
|
||||
if type(json_format) != dict:
|
||||
return {}
|
||||
prepared_json_format = dict()
|
||||
try:
|
||||
prepared_json_format['thought'] = str(json_format['thought'])
|
||||
except Exception as e:
|
||||
prepared_json_format['thought'] = ''
|
||||
try:
|
||||
prepared_json_format['name'] = str(json_format['name'])
|
||||
except Exception as e:
|
||||
prepared_json_format['name'] = ''
|
||||
|
||||
if self.default_prompt_type == 'json':
|
||||
try:
|
||||
if isinstance(json_format['args'], dict):
|
||||
prepared_json_format['args'] = json_format['args']
|
||||
else:
|
||||
prepared_json_format['args'] = dict()
|
||||
except:
|
||||
prepared_json_format['args'] = dict()
|
||||
else:
|
||||
try:
|
||||
prepared_json_format['args'] = str(json_format['args'])
|
||||
except Exception as e:
|
||||
prepared_json_format['args'] = ""
|
||||
|
||||
return prepared_json_format
|
||||
|
||||
def _process_response(
|
||||
self,
|
||||
datum,
|
||||
) -> ResponseDataSample:
|
||||
"""Process the response to needed format.
|
||||
Args:
|
||||
datum(dict): inputs.
|
||||
Returns:
|
||||
dict: Processed response data sample.
|
||||
"""
|
||||
|
||||
# Generated response, which can be a string or list
|
||||
pred_data = datum['prediction']
|
||||
# Response of ground truth, which can be a string or list
|
||||
gt_data = datum['ground_truth']
|
||||
# prompt_type: The type of planning prompt, supporting "json" and "ReWOO"
|
||||
if "meta_data" in datum:
|
||||
prompt_type = datum["meta_data"].get("response_format", self.default_prompt_type)
|
||||
else:
|
||||
prompt_type = self.default_prompt_type
|
||||
|
||||
error = 0
|
||||
gt = self.format_load(gt_data)
|
||||
# pred_data = input_postprocess(pred_data)
|
||||
|
||||
if prompt_type == 'json':
|
||||
pred = self.format_load(pred_data)
|
||||
if pred == {} or gt == {}:
|
||||
error = 1
|
||||
elif prompt_type == 'str':
|
||||
# choose the first line
|
||||
pred = dict()
|
||||
if self.eval_type == 'reason':
|
||||
pred['thought'] = pred_data
|
||||
if self.eval_type == 'retrieve':
|
||||
pred['name'] = pred_data
|
||||
if self.eval_type == 'understand':
|
||||
pred['args'] = pred_data
|
||||
else:
|
||||
raise NotImplementedError(f"Currently, we only support json and str format, but get {prompt_type}")
|
||||
|
||||
if error == 1:
|
||||
pred = dict()
|
||||
return ResponseDataSample(template = '', pred=pred, gt=gt), error
|
||||
|
||||
def _evaluate(self, data_sample):
|
||||
"""Evaluate the response data sample.
|
||||
"""
|
||||
# To enable batch evaluation, the evaluator is written at post_process.
|
||||
return data_sample
|
||||
|
||||
def evaluate(self):
|
||||
self._load_dataset()
|
||||
results_list = []
|
||||
for data_sample in tqdm(self.dataset):
|
||||
metrics_result = self._evaluate(
|
||||
data_sample['response_data_sample'])
|
||||
results_list.append(metrics_result)
|
||||
return self._post_process(results_list)
|
||||
|
||||
def find_a_dot_b_structure(self, text):
|
||||
# find a.b structure
|
||||
pattern = r'\w+\.\w+'
|
||||
return re.findall(pattern, text)
|
||||
|
||||
def find_FinishAction(self, text):
|
||||
# find FinishAction
|
||||
pattern = r'FinishAction'
|
||||
return re.findall(pattern, text)
|
||||
|
||||
def _post_process(self, results_list):
|
||||
# list of dict to dict of list
|
||||
if self.default_prompt_type == 'json':
|
||||
metric_keys = ['thought', 'name', 'args', 'parse_rate']
|
||||
if self.default_prompt_type == 'str':
|
||||
if self.eval_type == 'reason':
|
||||
metric_keys = ['thought', 'parse_rate']
|
||||
if self.eval_type == 'retrieve':
|
||||
metric_keys = ['name', 'parse_rate']
|
||||
if self.eval_type == 'understand':
|
||||
metric_keys = ['args', 'parse_rate']
|
||||
metrics_results = []
|
||||
batch_data = []; batch_arg_data = []
|
||||
batch_id = []; batch_arg_id = []
|
||||
BATCH_LIMIT = 32
|
||||
for id, data in enumerate(results_list):
|
||||
metrics_results.append(
|
||||
{metric_keys[x]: 0 for x in range(len(metric_keys))}
|
||||
)
|
||||
if len(data.pred.keys()) != 0:
|
||||
metrics_results[id]['parse_rate'] = 1
|
||||
if 'thought' in data.pred and 'thought' in data.gt:
|
||||
batch_data.extend([data.pred['thought'], data.gt['thought']])
|
||||
batch_id.extend([id])
|
||||
if len(batch_data) >= BATCH_LIMIT:
|
||||
pred_emb = self.sentence_model.encode(batch_data, convert_to_tensor=True)
|
||||
for i in range(0, len(batch_data), 2):
|
||||
cosine_score = np.maximum(util.cos_sim(pred_emb[i], pred_emb[i+1]).cpu().numpy(), 0)
|
||||
metrics_results[batch_id[i // 2]]['thought'] = cosine_score[0, 0]
|
||||
batch_data = []
|
||||
batch_id = []
|
||||
if 'name' in data.pred and 'name' in data.gt:
|
||||
if self.default_prompt_type == 'json':
|
||||
if data.pred['name'] == data.gt['name']:
|
||||
metrics_results[id]['name'] = 1
|
||||
else:
|
||||
metrics_results[id]['name'] = 0
|
||||
else:
|
||||
if data.gt['name'] not in data.pred['name']:
|
||||
metrics_results[id]['name'] = 0
|
||||
else:
|
||||
metrics_results[id]['name'] = 1
|
||||
find_all_name = self.find_a_dot_b_structure(data.pred['name']) + self.find_FinishAction(data.pred['name'])
|
||||
for name in find_all_name:
|
||||
if name != data.gt['name']:
|
||||
metrics_results[id]['name'] = 0
|
||||
|
||||
if 'args' in data.pred and 'args' in data.gt:
|
||||
batch_arg_data.extend([str(data.pred['args']), str(data.gt['args'])])
|
||||
batch_arg_id.extend([id])
|
||||
if len(batch_arg_data) >= BATCH_LIMIT:
|
||||
pred_emb = self.sentence_model.encode(batch_arg_data, convert_to_tensor=True)
|
||||
for i in range(0, len(batch_arg_data), 2):
|
||||
cosine_score = np.maximum(util.cos_sim(pred_emb[i], pred_emb[i+1]).cpu().numpy(), 0)
|
||||
metrics_results[batch_arg_id[i // 2]]['args'] = cosine_score[0, 0]
|
||||
batch_arg_data = []
|
||||
batch_arg_id = []
|
||||
|
||||
if len(batch_data) > 0:
|
||||
pred_emb = self.sentence_model.encode(batch_data, convert_to_tensor=True)
|
||||
for i in range(0, len(batch_data), 2):
|
||||
cosine_score = np.maximum(util.cos_sim(pred_emb[i], pred_emb[i+1]).cpu().numpy(), 0)
|
||||
metrics_results[batch_id[i // 2]]['thought'] = cosine_score[0, 0]
|
||||
batch_data = []
|
||||
batch_id = []
|
||||
|
||||
if len(batch_arg_data) > 0:
|
||||
pred_emb = self.sentence_model.encode(batch_arg_data, convert_to_tensor=True)
|
||||
for i in range(0, len(batch_arg_data), 2):
|
||||
cosine_score = np.maximum(util.cos_sim(pred_emb[i], pred_emb[i+1]).cpu().numpy(), 0)
|
||||
metrics_results[batch_arg_id[i // 2]]['args'] = cosine_score[0, 0]
|
||||
batch_arg_data = []
|
||||
batch_arg_id = []
|
||||
|
||||
results = dict()
|
||||
for key in metric_keys:
|
||||
results[key] = mean([metrics_results[key] for metrics_results in metrics_results])
|
||||
return results
|
||||
|
||||
class ReasonRetrieveUnderstandEvaluatorNoBatch:
|
||||
"""Planning Evaluation
|
||||
Args:
|
||||
dataset_path(str): File path of evaluation dataset
|
||||
bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2".
|
||||
Refer to https://www.sbert.net/docs/pretrained_models.html for more models.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
dataset_path: str,
|
||||
bert_score_model: str = "all-mpnet-base-v2",
|
||||
default_prompt_type: str = 'json',
|
||||
eval_type: str = 'reason',
|
||||
) -> None:
|
||||
self.bert_score_model = bert_score_model
|
||||
self.dataset_path = dataset_path
|
||||
# self.bertscore = evaluate.load('bertscore')
|
||||
self.default_prompt_type = default_prompt_type # ["json", "str"]
|
||||
self.eval_type = eval_type
|
||||
self.valid_data_count = None
|
||||
self.sentence_model = SentenceTransformer(self.bert_score_model)
|
||||
|
||||
def _load_dataset(self):
|
||||
self.dataset = []
|
||||
dataset = load(self.dataset_path)
|
||||
total_error = 0
|
||||
total_count = 0
|
||||
for key in dataset.keys():
|
||||
datum = dataset[key]
|
||||
data_sample, error = self._process_response(datum)
|
||||
total_error += error
|
||||
total_count += 1
|
||||
self.dataset.append(
|
||||
dict(response_data_sample=data_sample))
|
||||
|
||||
self.num_samples = len(self.dataset)
|
||||
# print("total_data_count:", total_count, "valid_data_count:", total_count - total_error)
|
||||
self.valid_data_count = total_count - total_error
|
||||
|
||||
def format_load(self, data):
|
||||
r'''
|
||||
ensure evaluator can work correctly under any data input
|
||||
'''
|
||||
if type(data) == dict:
|
||||
json_format = data
|
||||
else:
|
||||
try:
|
||||
json_format = json.loads(data) #json.loads(pred_data)
|
||||
except Exception as e:
|
||||
return {}
|
||||
if type(json_format) != dict:
|
||||
return {}
|
||||
prepared_json_format = dict()
|
||||
try:
|
||||
prepared_json_format['thought'] = str(json_format['thought'])
|
||||
except Exception as e:
|
||||
prepared_json_format['thought'] = ''
|
||||
try:
|
||||
prepared_json_format['name'] = str(json_format['name'])
|
||||
except Exception as e:
|
||||
prepared_json_format['name'] = ''
|
||||
try:
|
||||
if prepared_json_format["name"] != "FinishAction":
|
||||
arg_inputs = json_format["args"]
|
||||
if type(arg_inputs) == str:
|
||||
arg_inputs = json.loads(arg_inputs)
|
||||
if type(arg_inputs) == dict:
|
||||
prepared_json_format['args'] = arg_inputs
|
||||
else:
|
||||
prepared_json_format["args"] = {}
|
||||
else:
|
||||
prepared_json_format["args"] = {}
|
||||
except Exception as e:
|
||||
prepared_json_format['args'] = {}
|
||||
return prepared_json_format
|
||||
|
||||
def _process_response(
|
||||
self,
|
||||
datum,
|
||||
) -> ResponseDataSample:
|
||||
"""Process the response to needed format.
|
||||
Args:
|
||||
datum(dict): inputs.
|
||||
Returns:
|
||||
dict: Processed response data sample.
|
||||
"""
|
||||
|
||||
# Generated response, which can be a string or list
|
||||
pred_data = datum['prediction']
|
||||
# Response of ground truth, which can be a string or list
|
||||
gt_data = datum['ground_truth']
|
||||
# prompt_type: The type of planning prompt, supporting "json" and "ReWOO"
|
||||
if "meta" in datum:
|
||||
prompt_type = datum["meta"].get("response_format", self.default_prompt_type)
|
||||
else:
|
||||
prompt_type = self.default_prompt_type
|
||||
|
||||
error = 0
|
||||
gt = self.format_load(gt_data)
|
||||
# pred_data = input_postprocess(pred_data)
|
||||
if prompt_type == 'json':
|
||||
# pred_data = pred_data.replace('\'', '\"')
|
||||
pred = self.format_load(pred_data)
|
||||
if pred == {} or gt == {}:
|
||||
error = 1
|
||||
elif prompt_type == 'str':
|
||||
# choose the first line
|
||||
pred = dict()
|
||||
if self.eval_type == 'reason':
|
||||
pred['thought'] = pred_data
|
||||
if self.eval_type == 'retrieve':
|
||||
pred['name'] = pred_data
|
||||
if self.eval_type == 'understand':
|
||||
# pred_data = pred_data.replace('\'', '\"')
|
||||
# try:
|
||||
# pred['args'] = json.loads(pred_data)
|
||||
# if type(pred['args']) != dict:
|
||||
# pred['args'] = {}
|
||||
# except Exception as e:
|
||||
# error = 1
|
||||
pred['args'] = pred_data
|
||||
else:
|
||||
raise NotImplementedError(f"Currently, we only support json and str format, but get {prompt_type}")
|
||||
|
||||
if error == 1:
|
||||
pred = dict()
|
||||
return ResponseDataSample(template = '', pred=pred, gt=gt), error
|
||||
|
||||
def _evaluate(self, data_sample) -> dict:
|
||||
"""Evaluate the response data sample.
|
||||
"""
|
||||
metrics_result = {
|
||||
'thought': 0,
|
||||
'name': 0,
|
||||
'args_precision': 0,
|
||||
'args_recall': 0,
|
||||
'args_f1_score': 0,
|
||||
'parse_rate': 0,
|
||||
}
|
||||
if 'thought' in data_sample.pred and 'thought' in data_sample.gt:
|
||||
pred_emb = self.sentence_model.encode(data_sample.pred['thought'], convert_to_tensor=True)
|
||||
gt_emb = self.sentence_model.encode(data_sample.gt['thought'], convert_to_tensor=True)
|
||||
cosine_scores = np.maximum(util.cos_sim(pred_emb, gt_emb).cpu().numpy(), 0)
|
||||
metrics_result['thought'] = cosine_scores[0, 0]
|
||||
|
||||
if 'name' in data_sample.pred and 'name' in data_sample.gt:
|
||||
if data_sample.pred['name'] == data_sample.gt['name']:
|
||||
metrics_result['name'] = 1
|
||||
else:
|
||||
metrics_result['name'] = 0
|
||||
if 'args' in data_sample.pred and 'args' in data_sample.gt:
|
||||
gt_num_keys = len(data_sample.gt['args'].keys())
|
||||
pred_num_keys = len(data_sample.pred['args'].keys())
|
||||
if pred_num_keys == 0 and gt_num_keys == 0:
|
||||
metrics_result['args_precision'] = 1
|
||||
metrics_result['args_recall'] = 1
|
||||
metrics_result['args_f1_score'] = 1
|
||||
elif pred_num_keys == 0 or gt_num_keys == 0:
|
||||
metrics_result['args_precision'] = 0
|
||||
metrics_result['args_recall'] = 0
|
||||
metrics_result['args_f1_score'] = 0
|
||||
else:
|
||||
correct_count = 0
|
||||
for key in data_sample.gt['args'].keys():
|
||||
if key in data_sample.pred['args'] and str(data_sample.pred['args'][key]) == str(data_sample.gt['args'][key]):
|
||||
correct_count += 1
|
||||
metrics_result['args_precision'] = correct_count / pred_num_keys
|
||||
metrics_result['args_recall'] = correct_count / gt_num_keys
|
||||
if metrics_result['args_precision'] + metrics_result['args_recall'] == 0:
|
||||
metrics_result['args_f1_score'] = 0
|
||||
else:
|
||||
metrics_result['args_f1_score'] = 2 * metrics_result['args_precision'] * metrics_result['args_recall'] / \
|
||||
(metrics_result['args_precision'] + metrics_result['args_recall'])
|
||||
|
||||
if len(data_sample.pred.keys()) == 0:
|
||||
metrics_result['parse_rate'] = 0
|
||||
else:
|
||||
metrics_result['parse_rate'] = 1
|
||||
return metrics_result
|
||||
|
||||
def evaluate(self):
|
||||
self._load_dataset()
|
||||
results_list = []
|
||||
for data_sample in tqdm(self.dataset):
|
||||
metrics_result = self._evaluate(
|
||||
data_sample['response_data_sample'])
|
||||
results_list.append(metrics_result)
|
||||
return self._post_process(results_list)
|
||||
|
||||
def _post_process(self, results_list):
|
||||
# list of dict to dict of list
|
||||
results = dict()
|
||||
if self.default_prompt_type == 'json':
|
||||
metric_keys = ['thought', 'name', 'args_precision', 'args_recall', 'args_f1_score', 'parse_rate']
|
||||
if self.default_prompt_type == 'str':
|
||||
if self.eval_type == 'reason':
|
||||
metric_keys = ['thought', 'parse_rate']
|
||||
if self.eval_type == 'retrieve':
|
||||
metric_keys = ['name', 'parse_rate']
|
||||
if self.eval_type == 'understand':
|
||||
metric_keys = ['args_precision', 'args_recall', 'args_f1_score', 'parse_rate']
|
||||
for key in metric_keys:
|
||||
results[key] = mean([result[key] for result in results_list])
|
||||
return results
|
123
opencompass/datasets/teval/evaluators/review_evaluator.py
Normal file
123
opencompass/datasets/teval/evaluators/review_evaluator.py
Normal file
@ -0,0 +1,123 @@
|
||||
from collections import defaultdict
|
||||
from mmengine import load
|
||||
|
||||
from ..schema import ResponseDataSample
|
||||
import numpy as np
|
||||
from ..utils.format_load import format_load
|
||||
|
||||
class ReviewEvaluator:
|
||||
"""Review Capability Evaluation
|
||||
|
||||
Args:
|
||||
dataset_path(str): File path of evaluation dataset.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dataset_path: str,
|
||||
# bert_score_model: str = "all-mpnet-base-v2",
|
||||
**kwargs,
|
||||
) -> None:
|
||||
self.dataset_path = dataset_path
|
||||
# self.bert_score_model = bert_score_model
|
||||
# self.sentence_model = SentenceTransformer(self.bert_score_model)
|
||||
|
||||
def _load_dataset(self):
|
||||
self.dataset = []
|
||||
dataset = load(self.dataset_path)
|
||||
|
||||
for key in dataset.keys():
|
||||
datum = dataset[key]
|
||||
data_sample = self._process_response(datum)
|
||||
|
||||
self.dataset.append(
|
||||
dict(
|
||||
origin_prompt=datum['origin_prompt'],
|
||||
response_data_sample=data_sample))
|
||||
self.num_samples = len(self.dataset)
|
||||
|
||||
def _process_response(
|
||||
self,
|
||||
datum: dict,
|
||||
) -> ResponseDataSample:
|
||||
"""Process the response to needed format.
|
||||
|
||||
Args:
|
||||
datum(dict): inputs.
|
||||
|
||||
Returns:
|
||||
dict: Processed response data sample.
|
||||
"""
|
||||
|
||||
template = datum['template']
|
||||
pred_data = datum['prediction']
|
||||
gt_data = datum['ground_truth']['answer']
|
||||
meta_data = datum['meta_data']
|
||||
|
||||
if meta_data['response_format'] == 'json':
|
||||
pred_data = self.json_format_parse(pred_data)
|
||||
else:
|
||||
pred_data = pred_data[pred_data.find(":") + 1:]
|
||||
pred_data = pred_data.strip()
|
||||
if len(pred_data) > 0 and pred_data[0] in ['A', 'B', 'C', 'D', 'E']:
|
||||
pred_data = pred_data[0]
|
||||
else:
|
||||
pred_data = None
|
||||
|
||||
return ResponseDataSample(
|
||||
template=template, pred=pred_data, gt=gt_data, meta_data=meta_data)
|
||||
|
||||
def _evaluate(self, data_sample) -> dict:
|
||||
metrics_result = dict(
|
||||
parse_rate=0,
|
||||
review_quality=0,
|
||||
)
|
||||
|
||||
pred_data = data_sample.pred
|
||||
if pred_data is not None:
|
||||
# import pdb; pdb.set_trace()
|
||||
metrics_result['review_quality'] = 1.0 if pred_data == \
|
||||
data_sample.gt else 0.0
|
||||
metrics_result['parse_rate'] = 1.0
|
||||
return metrics_result
|
||||
|
||||
# def compute_sen_similarity(self, gt, pred):
|
||||
# gt_embed = self.sentence_model.encode(gt, convert_to_tensor=True)
|
||||
# pred_embed = self.sentence_model.encode(pred, convert_to_tensor=True)
|
||||
# sen_sim = max(0, util.cos_sim(gt_embed, pred_embed).item())
|
||||
# return sen_sim
|
||||
|
||||
def json_format_parse(self, pred_data):
|
||||
try:
|
||||
data = format_load(pred_data)
|
||||
except Exception as e:
|
||||
return None
|
||||
try:
|
||||
new_data = dict()
|
||||
new_data['review'] = data['is_finished']
|
||||
assert new_data['review'] in [True, False]
|
||||
except Exception as e:
|
||||
return None
|
||||
return new_data
|
||||
|
||||
def evaluate(self):
|
||||
self._load_dataset()
|
||||
results_list = []
|
||||
for data_sample in self.dataset:
|
||||
metrics_result = self._evaluate(
|
||||
data_sample['response_data_sample'])
|
||||
results_list.append(metrics_result)
|
||||
return self._post_process(results_list)
|
||||
|
||||
def _post_process(self, results_list):
|
||||
# list of dict to dict of list
|
||||
results_dict = defaultdict(list)
|
||||
{
|
||||
results_dict[key].append(sub[key])
|
||||
for sub in results_list for key in sub
|
||||
}
|
||||
metric_list = ['parse_rate', 'review_quality']
|
||||
for metric in metric_list:
|
||||
results_dict[metric] = np.round(np.mean(results_dict[metric]), decimals=4)
|
||||
return results_dict
|
19
opencompass/datasets/teval/schema.py
Normal file
19
opencompass/datasets/teval/schema.py
Normal file
@ -0,0 +1,19 @@
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResponseDataSample:
|
||||
"""
|
||||
Args:
|
||||
template(str): Format string with keyword-only arguments. For
|
||||
example '{who} like {what}'
|
||||
pred(Any): Parsed data from LLM generating response.
|
||||
gt(Any): Ground truth data
|
||||
meta_data(dict, optional): Meta information will be used to evaluate
|
||||
LLM's response
|
||||
"""
|
||||
template: str
|
||||
pred: Any
|
||||
gt: Any
|
||||
meta_data: dict = None
|
0
opencompass/datasets/teval/utils/__init__.py
Normal file
0
opencompass/datasets/teval/utils/__init__.py
Normal file
35
opencompass/datasets/teval/utils/convert_results.py
Normal file
35
opencompass/datasets/teval/utils/convert_results.py
Normal file
@ -0,0 +1,35 @@
|
||||
import mmengine
|
||||
import os
|
||||
import argparse
|
||||
import numpy as np
|
||||
# np.set_printoptions(precision=1)
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--result_path', type=str)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
def convert_results(result_path):
|
||||
result = mmengine.load(result_path)
|
||||
instruct_list = [(result['instruct_json']['json_format_metric'] + result['instruct_json']['json_args_em_metric']) / 2, \
|
||||
(result['instruct_json']['string_format_metric'] + result['instruct_json']['string_args_em_metric']) / 2]
|
||||
plan_list = [result['plan_str']['f1_score'], result['plan_json']['f1_score']]
|
||||
reason_list = [result['reason_str']['thought'], result['rru_json']['thought']]
|
||||
retrieve_list = [result['retrieve_str']['name'], result['rru_json']['name']]
|
||||
understand_list = [result['understand_str']['args'], result['rru_json']['args']]
|
||||
review_list = [result['review_str']['review_quality'], result['review_str']['review_quality']]
|
||||
|
||||
final_score = [np.mean(instruct_list), np.mean(plan_list), np.mean(reason_list), \
|
||||
np.mean(retrieve_list), np.mean(understand_list), np.mean(review_list)]
|
||||
overall = np.mean(final_score)
|
||||
final_score.insert(0, overall)
|
||||
name_list = ['Overall', 'Instruct', 'Plan', 'Reason', 'Retrieve', 'Understand', 'Review']
|
||||
print("Cut Paste Results: ", np.array(final_score) * 100)
|
||||
for i in range(len(name_list)):
|
||||
print("%s: %.1f" % (name_list[i], final_score[i]*100), end='\t')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
convert_results(args.result_path)
|
44
opencompass/datasets/teval/utils/format_load.py
Normal file
44
opencompass/datasets/teval/utils/format_load.py
Normal file
@ -0,0 +1,44 @@
|
||||
import ast
|
||||
import json
|
||||
def format_load(raw_data: str, start_character: str = '', end_character: str = ''):
|
||||
"""Format the raw data into the format that can be evaluated.
|
||||
|
||||
Args:
|
||||
raw_data (str): The raw data.
|
||||
start_character (str, optional): The start character. Defaults to '', if using it, the string will be sliced from the first start_character.
|
||||
end_character (str, optional): The end character. Defaults to '', if using it, the string will be sliced to the last end_character.
|
||||
|
||||
Returns:
|
||||
str: The formatted data.
|
||||
"""
|
||||
if type(raw_data) != str:
|
||||
# the data has been evaluated
|
||||
return raw_data
|
||||
if "```json" in raw_data:
|
||||
raw_data = raw_data[raw_data.find("```json") + len("```json"):]
|
||||
raw_data = raw_data.strip("`")
|
||||
if start_character != '':
|
||||
raw_data = raw_data[raw_data.find(start_character):]
|
||||
if end_character != '':
|
||||
raw_data = raw_data[:raw_data.rfind(end_character) + len(end_character)]
|
||||
successful_parse = False
|
||||
try:
|
||||
data = ast.literal_eval(raw_data)
|
||||
successful_parse = True
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
if not successful_parse:
|
||||
data = json.loads(raw_data)
|
||||
successful_parse = True
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
if not successful_parse:
|
||||
data = json.loads(raw_data.replace("\'", "\""))
|
||||
successful_parse = True
|
||||
except Exception as e:
|
||||
pass
|
||||
if not successful_parse:
|
||||
raise Exception("Cannot parse raw data")
|
||||
return data
|
11
opencompass/datasets/teval/utils/meta_template.py
Normal file
11
opencompass/datasets/teval/utils/meta_template.py
Normal file
@ -0,0 +1,11 @@
|
||||
meta_template_dict = dict(
|
||||
internlm = [
|
||||
dict(role='system', begin='<|System|>:', end='\n'),
|
||||
dict(role='user', begin='<|User|>:', end='\n'),
|
||||
dict(
|
||||
role='assistant',
|
||||
begin='<|Bot|>:',
|
||||
end='<eoa>\n',
|
||||
generate=True)
|
||||
],
|
||||
)
|
76
opencompass/datasets/teval/utils/template.py
Normal file
76
opencompass/datasets/teval/utils/template.py
Normal file
@ -0,0 +1,76 @@
|
||||
import re
|
||||
from string import Formatter
|
||||
|
||||
|
||||
def format_string(template: str, input_data: dict) -> str:
|
||||
"""Return string with input content according input format template.
|
||||
|
||||
Args:
|
||||
template (str): Format string with keyword-only argument. For
|
||||
example '{who} like {what}'
|
||||
input_data (dict): Input data to fill in the input template.
|
||||
|
||||
Returns:
|
||||
str: Return string.
|
||||
"""
|
||||
|
||||
return template.format(**input_data)
|
||||
|
||||
|
||||
def parse_string(template: str, input_string: str, allow_newline: bool=False) -> dict:
|
||||
"""Return a dictionary whose keys are from input template and value is
|
||||
responding content from input_string.
|
||||
|
||||
Args:
|
||||
template (str): Format template with keyword-only argument. For
|
||||
example '{who} like {what}'
|
||||
input_string (str): Input string will be parsed.
|
||||
allow_newline (boolen): Whether allow '\n' in {} during RE match, default to False.
|
||||
|
||||
Returns:
|
||||
dict: Parsed data from input string according to format string. If
|
||||
input string doesn't match template, It will return None.
|
||||
|
||||
Examples:
|
||||
>>> template = '{who} like {what}'
|
||||
>>> input_string = 'monkey like banana'
|
||||
>>> data = parse_string(template, input_string)
|
||||
>>> data
|
||||
>>> {'who': 'monkey', 'what': 'banana'}
|
||||
>>> input_string = 'monkey likes banana'
|
||||
>>> data = parse_string(template, input_string)
|
||||
>>> data
|
||||
>>> None
|
||||
>>> template = '{what} like {what}'
|
||||
>>> input_string = 'monkey like banana'
|
||||
>>> data = parse_string(template, input_string)
|
||||
>>> data
|
||||
>>> {'what': ['monkey', 'banana']}
|
||||
"""
|
||||
|
||||
formatter = Formatter()
|
||||
context = []
|
||||
keys = []
|
||||
for v in formatter.parse(template):
|
||||
# v is (literal_text, field_name, format_spec, conversion)
|
||||
if v[1] is not None:
|
||||
keys.append(v[1])
|
||||
context.append(v[0])
|
||||
pattern = template
|
||||
for k in keys:
|
||||
pattern = pattern.replace('{' + f'{k}' + '}', '(.*)')
|
||||
# pattern = re.compile(rf'{pattern}')
|
||||
values = re.findall(pattern, input_string, re.S if allow_newline else 0)
|
||||
if len(values) < 1:
|
||||
return None
|
||||
data = dict()
|
||||
for k, v in zip(keys, values[0]):
|
||||
if k in data:
|
||||
tmp = data[k]
|
||||
if isinstance(tmp, list):
|
||||
data[k].append(v)
|
||||
else:
|
||||
data[k] = [tmp, v]
|
||||
else:
|
||||
data[k] = v
|
||||
return data
|
@ -85,9 +85,10 @@ class TriviaQAEvaluator(BaseEvaluator):
|
||||
cnt = 0
|
||||
for pred, cand_ans in zip(processed_predictions, processed_answers):
|
||||
detail = {'pred': pred, 'answer': cand_ans, 'correct': False}
|
||||
cnt += int(any([cand == pred for cand in cand_ans]))
|
||||
if int(any([cand == pred for cand in cand_ans])):
|
||||
detail['correct'] = True
|
||||
# is_correct = any([cand == pred for cand in cand_ans])
|
||||
is_correct = any([cand in pred for cand in cand_ans])
|
||||
cnt += int(is_correct)
|
||||
detail['correct'] = is_correct
|
||||
details.append(detail)
|
||||
score = cnt / len(predictions) * 100
|
||||
|
||||
|
@ -150,7 +150,7 @@ class BaiChuan(BaseAPIModel):
|
||||
return msg
|
||||
|
||||
if raw_response.status_code != 200:
|
||||
print(raw_response)
|
||||
print(raw_response.json())
|
||||
time.sleep(1)
|
||||
continue
|
||||
print(response)
|
||||
|
@ -109,10 +109,8 @@ class HuggingFace(BaseModel):
|
||||
max_seq_len=max_seq_len,
|
||||
tokenizer_only=tokenizer_only,
|
||||
meta_template=meta_template)
|
||||
from opencompass.utils.fileio import patch_hf_auto_model
|
||||
if hf_cache_dir is None:
|
||||
hf_cache_dir = os.getenv('HF_MODEL_HUB', None)
|
||||
patch_hf_auto_model(hf_cache_dir)
|
||||
self.logger = get_logger()
|
||||
self.pad_token_id = pad_token_id
|
||||
assert mode in ['none', 'mid']
|
||||
|
@ -405,6 +405,7 @@ class OpenAIAllesAPIN(OpenAI):
|
||||
except requests.JSONDecodeError:
|
||||
self.logger.error('JsonDecode error, got',
|
||||
str(raw_response.content))
|
||||
time.sleep(1)
|
||||
continue
|
||||
if raw_response.status_code == 200 and response[
|
||||
'msgCode'] == '10000':
|
||||
@ -415,6 +416,8 @@ class OpenAIAllesAPIN(OpenAI):
|
||||
else:
|
||||
return choices[0]['message']['content'].strip()
|
||||
self.logger.error(response['msg'])
|
||||
self.logger.error(response)
|
||||
time.sleep(1)
|
||||
|
||||
raise RuntimeError('API call failed.')
|
||||
|
||||
|
@ -193,6 +193,7 @@ class SenseTime(BaseAPIModel):
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
return ''
|
||||
raise RuntimeError(
|
||||
f'request id: '
|
||||
f'{raw_response.headers.get("X-Request-Id")}, {raw_response.text}')
|
||||
|
@ -119,6 +119,8 @@ class ZhiPuV2AI(BaseAPIModel):
|
||||
while max_num_retries < self.retry:
|
||||
self.acquire()
|
||||
|
||||
response = None
|
||||
|
||||
try:
|
||||
response = self.client.chat.completions.create(**data)
|
||||
except APIStatusError as err:
|
||||
|
@ -30,12 +30,17 @@ class NumWorkerPartitioner(BasePartitioner):
|
||||
out_dir: str,
|
||||
num_worker: int = 8,
|
||||
min_task_size: int = 16,
|
||||
strategy: str = 'heuristic',
|
||||
dataset_size_path: str = '.cache/dataset_size.json',
|
||||
keep_keys: Optional[List[str]] = None):
|
||||
super().__init__(out_dir=out_dir, keep_keys=keep_keys)
|
||||
self.num_worker = num_worker
|
||||
self.min_task_size = min_task_size
|
||||
self.dataset_size_path = dataset_size_path
|
||||
assert strategy in ('heuristic', 'split'), \
|
||||
f'Unsupported partition strategy: {strategy}. '\
|
||||
'Supported strategies are: `heuristic`, `split` .'
|
||||
self.strategy = strategy
|
||||
|
||||
def partition(self,
|
||||
model_dataset_combinations: List[Dict[str, List]],
|
||||
@ -64,16 +69,26 @@ class NumWorkerPartitioner(BasePartitioner):
|
||||
else:
|
||||
chunks.append(dataset)
|
||||
|
||||
buckets = [[] for _ in range(self.num_worker)]
|
||||
for i, chunk in enumerate(chunks):
|
||||
buckets[i % self.num_worker].append(chunk)
|
||||
if self.strategy == 'heuristic':
|
||||
buckets = [[] for _ in range(self.num_worker)]
|
||||
for i, chunk in enumerate(chunks):
|
||||
buckets[i % self.num_worker].append(chunk)
|
||||
|
||||
for bucket in buckets:
|
||||
if len(bucket) > 0:
|
||||
for bucket in buckets:
|
||||
if len(bucket) > 0:
|
||||
tasks.append(
|
||||
Config({
|
||||
'models': [model],
|
||||
'datasets': [bucket],
|
||||
'work_dir': work_dir,
|
||||
**add_cfg
|
||||
}))
|
||||
elif self.strategy == 'split':
|
||||
for dataset in chunks:
|
||||
tasks.append(
|
||||
Config({
|
||||
'models': [model],
|
||||
'datasets': [bucket],
|
||||
'datasets': [[dataset]],
|
||||
'work_dir': work_dir,
|
||||
**add_cfg
|
||||
}))
|
||||
|
@ -16,7 +16,7 @@ from opencompass.utils import (LarkReporter, dataset_abbr_from_cfg,
|
||||
model_abbr_from_cfg)
|
||||
from opencompass.utils.prompt import get_prompt_hash
|
||||
|
||||
METRIC_WHITELIST = ['score', 'auc_score', 'accuracy', 'humaneval_pass@1', 'rouge1', 'avg_toxicity_score', 'bleurt_diff', 'matthews_correlation', 'truth']
|
||||
METRIC_WHITELIST = ['score', 'auc_score', 'accuracy', 'humaneval_pass@1', 'rouge1', 'avg_toxicity_score', 'bleurt_diff', 'matthews_correlation', 'truth', 'f1', 'exact_match']
|
||||
METRIC_BLACKLIST = ['bp', 'sys_len', 'ref_len']
|
||||
|
||||
def model_abbr_from_cfg_used_in_summarizer(model):
|
||||
|
@ -29,6 +29,62 @@ All_Dimensions = [
|
||||
'公平与可负责程度', '丰富度', '综合得分'
|
||||
]
|
||||
|
||||
MAPPING = {
|
||||
'事实与解释型回答': ['事实正确性', '满足用户需求', '清晰度', '完备性'],
|
||||
'逻辑推理型回答': ['事实正确性', '满足用户需求', '逻辑连贯性', '完备性'],
|
||||
'生成型回答': ['事实正确性', '满足用户需求', '逻辑连贯性', '创造性', '丰富度'],
|
||||
'建议型回答': ['事实正确性', '满足用户需求', '公平与可负责程度', '创造性']
|
||||
}
|
||||
|
||||
|
||||
def detect_mapping(text):
|
||||
if '清晰度' in text and '完备性' in text:
|
||||
return '事实与解释型回答'
|
||||
elif '完备性' in text and '逻辑连贯性' in text:
|
||||
return '逻辑推理型回答'
|
||||
elif '创造性' in text and '丰富度' in text:
|
||||
return '生成型回答'
|
||||
elif '创造性' in text and '公平与可负责程度' in text:
|
||||
return '建议型回答'
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def extract_missing_rating(text, search_type):
|
||||
searching_keys = MAPPING[search_type]
|
||||
result_dict = {}
|
||||
for k in searching_keys:
|
||||
matches = re.findall(rf'{k}.*?\n', text)
|
||||
result_dict[k] = None
|
||||
for match in reversed(matches):
|
||||
if re.findall(r'\d{1,2}', match):
|
||||
result_dict[k] = int(re.findall(r'\d{1,2}', match)[-1])
|
||||
break
|
||||
overall_number = re.findall('\d{1,2}', text)
|
||||
try:
|
||||
result_dict['综合得分'] = int(overall_number[-1])
|
||||
except:
|
||||
return {}
|
||||
return result_dict
|
||||
|
||||
|
||||
def extract_rating_plus(text):
|
||||
pattern = r'{(.*?)}(?![^{]*{)' # match last brackets
|
||||
match = re.search(pattern, text)
|
||||
|
||||
if match:
|
||||
dictionary_str = match.group(1)
|
||||
kv_pattern = r"'(.*?)': (\d+)"
|
||||
matches = re.findall(kv_pattern, dictionary_str)
|
||||
result_dict = {key: int(value) for key, value in matches}
|
||||
return result_dict
|
||||
else:
|
||||
match_type = detect_mapping(text=text)
|
||||
if match_type is not None:
|
||||
return extract_missing_rating(text=text, search_type=match_type)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def extract_rating(text):
|
||||
pattern = r'{(.*?)}(?![^{]*{)' # match last brackets
|
||||
@ -56,6 +112,50 @@ def check_rating(rating, all_dimensions):
|
||||
return rating
|
||||
|
||||
|
||||
def post_process_alignbench_plus(judgement: str,
|
||||
all_dimensions=All_Dimensions,
|
||||
possible_keys=['综合得分']):
|
||||
"""Input a string like below:
|
||||
|
||||
xxx{'事实正确性': 1, '满足用户需求': 1, '清晰度': 2, '完备性': 1, '综合得分': 1}xxx,
|
||||
and extract each score
|
||||
"""
|
||||
|
||||
def extract_score(text):
|
||||
keys_pattern = '|'.join(map(re.escape, possible_keys))
|
||||
pattern = rf"({'|'.join(possible_keys)}): (\d+(\.\d{{1,2}})?)"
|
||||
match = re.search(pattern, text)
|
||||
if match:
|
||||
try:
|
||||
return float(match.group(1))
|
||||
except ValueError:
|
||||
return -1
|
||||
return -1
|
||||
|
||||
# judgement = judgement.replace('\n', '')
|
||||
rating = extract_rating_plus(judgement)
|
||||
|
||||
if rating is not None:
|
||||
score = -1
|
||||
for key in possible_keys:
|
||||
score = rating.get(key, -1)
|
||||
if score != -1:
|
||||
break
|
||||
if score == -1:
|
||||
score = extract_score(judgement)
|
||||
if score >= 0 and score <= 10:
|
||||
pass
|
||||
else:
|
||||
score = -1
|
||||
rating = check_rating(rating, all_dimensions)
|
||||
else:
|
||||
score = -1
|
||||
if rating == None or score == -1:
|
||||
return None
|
||||
else:
|
||||
return {'rating': rating, 'score': score}
|
||||
|
||||
|
||||
def post_process_alignbench(judgement: str,
|
||||
all_dimensions=All_Dimensions,
|
||||
possible_keys=['综合得分']):
|
||||
@ -211,9 +311,12 @@ class AlignmentBenchSummarizer:
|
||||
]
|
||||
self.judge_abbr = model_abbr_from_cfg(self.cfg['judge_model'])
|
||||
self.judge_type = judge_type
|
||||
assert self.judge_type in ['general', 'autoj', 'judgelm']
|
||||
assert self.judge_type in [
|
||||
'general', 'autoj', 'judgelm', 'general_plus'
|
||||
]
|
||||
self.judge_map = {
|
||||
'general': post_process_alignbench,
|
||||
'general_plus': post_process_alignbench_plus,
|
||||
'autoj': post_process_autoj,
|
||||
'judgelm': post_process_judgelm
|
||||
}
|
||||
|
@ -67,8 +67,10 @@ class OpenICLEvalTask(BaseTask):
|
||||
|
||||
def __init__(self, cfg: ConfigDict):
|
||||
super().__init__(cfg)
|
||||
self.num_gpus = 0
|
||||
self.logger = get_logger()
|
||||
self.num_gpus = max(
|
||||
c.get('eval_cfg', {}).get('num_gpus', 0)
|
||||
for c in sum(self.dataset_cfgs, []))
|
||||
self.dump_details = cfg.get('eval', {}).get('runner', {}).get(
|
||||
'task', {}).get('dump_details', False)
|
||||
|
||||
|
@ -83,7 +83,6 @@ def first_option_postprocess(text: str, options: str, cushion=True) -> str:
|
||||
f'([{options}])\s?是正确答案',
|
||||
f'选项\s?([{options}])\s?正确',
|
||||
f'所以答\s?([{options}])',
|
||||
f'1.\s?([{options}])[.。$]?$',
|
||||
f'所以\s?([{options}][.。$]?$)',
|
||||
f'所有\s?([{options}][.。$]?$)',
|
||||
f'[\s,::,]([{options}])[。,,\.]?$',
|
||||
@ -105,6 +104,7 @@ def first_option_postprocess(text: str, options: str, cushion=True) -> str:
|
||||
f'(\s|^)[{options}][\s。,,::\.$]',
|
||||
f'(\s|^)[{options}](\s|$)',
|
||||
f'1.\s?(.*?)$',
|
||||
f'1.\s?([{options}])[.。$]?$',
|
||||
]
|
||||
cushion_patterns = [
|
||||
f'([{options}]):',
|
||||
|
@ -4,8 +4,9 @@ from typing import Dict
|
||||
|
||||
from mmengine.config import Config, ConfigDict
|
||||
|
||||
from opencompass.openicl.icl_inferencer import (CLPInferencer, GenInferencer,
|
||||
PPLInferencer,
|
||||
from opencompass.openicl.icl_inferencer import (AgentInferencer,
|
||||
ChatInferencer, CLPInferencer,
|
||||
GenInferencer, PPLInferencer,
|
||||
PPLOnlyInferencer)
|
||||
from opencompass.registry import ICL_PROMPT_TEMPLATES, ICL_RETRIEVERS
|
||||
from opencompass.utils import (Menu, build_dataset_from_cfg,
|
||||
@ -78,12 +79,16 @@ def print_prompts(model_cfg, dataset_cfg, count=1):
|
||||
|
||||
ice_idx_list = retriever.retrieve()
|
||||
|
||||
assert infer_cfg.inferencer.type in [
|
||||
PPLInferencer, GenInferencer, CLPInferencer, PPLOnlyInferencer], \
|
||||
'Only PPLInferencer and GenInferencer are supported'
|
||||
supported_inferencer = [
|
||||
AgentInferencer, PPLInferencer, GenInferencer, CLPInferencer,
|
||||
PPLOnlyInferencer, ChatInferencer
|
||||
]
|
||||
if infer_cfg.inferencer.type not in supported_inferencer:
|
||||
print(f'Only {supported_inferencer} are supported')
|
||||
return
|
||||
|
||||
for idx in range(min(count, len(ice_idx_list))):
|
||||
if infer_cfg.inferencer.type == PPLInferencer:
|
||||
if issubclass(infer_cfg.inferencer.type, PPLInferencer):
|
||||
labels = retriever.get_labels(ice_template=ice_template,
|
||||
prompt_template=prompt_template)
|
||||
ice = retriever.generate_ice(ice_idx_list[idx],
|
||||
@ -129,9 +134,7 @@ def print_prompts(model_cfg, dataset_cfg, count=1):
|
||||
print('-' * 100)
|
||||
print(prompt)
|
||||
print('-' * 100)
|
||||
elif infer_cfg.inferencer.type in [
|
||||
GenInferencer, CLPInferencer, PPLOnlyInferencer
|
||||
]:
|
||||
else:
|
||||
ice_idx = ice_idx_list[idx]
|
||||
ice = retriever.generate_ice(ice_idx, ice_template=ice_template)
|
||||
prompt = retriever.generate_prompt_for_generate_task(
|
||||
|
Loading…
Reference in New Issue
Block a user