mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Dataset] LongBench (#236)
Co-authored-by: wangchonghua <wangchonghua@pjlab.org.cn>
This commit is contained in:
parent
c6a3494993
commit
655a807f4b
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_2wikimqa_gen_6b3efc import LongBench_2wikimqa_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBench2wikimqaDataset
|
||||
|
||||
LongBench_2wikimqa_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_2wikimqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_2wikimqa_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_2wikimqa_datasets = [
|
||||
dict(
|
||||
type=LongBench2wikimqaDataset,
|
||||
abbr='LongBench_2wikimqa',
|
||||
path='THUDM/LongBench',
|
||||
name='2wikimqa',
|
||||
reader_cfg=LongBench_2wikimqa_reader_cfg,
|
||||
infer_cfg=LongBench_2wikimqa_infer_cfg,
|
||||
eval_cfg=LongBench_2wikimqa_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_dureader_gen_c6c7e4 import LongBench_dureader_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchRougeEvaluator, LongBenchdureaderDataset
|
||||
|
||||
LongBench_dureader_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_dureader_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='请基于给定的文章回答下述问题。\n\n文章:{context}\n\n请基于上述文章回答下面的问题。\n\n问题:{input}\n回答:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=128)
|
||||
)
|
||||
|
||||
LongBench_dureader_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchRougeEvaluator, language='zh'),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_dureader_datasets = [
|
||||
dict(
|
||||
type=LongBenchdureaderDataset,
|
||||
abbr='LongBench_dureader',
|
||||
path='THUDM/LongBench',
|
||||
name='dureader',
|
||||
reader_cfg=LongBench_dureader_reader_cfg,
|
||||
infer_cfg=LongBench_dureader_infer_cfg,
|
||||
eval_cfg=LongBench_dureader_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_gov_report_gen_54c5b0 import LongBench_gov_report_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchRougeEvaluator, LongBenchgov_reportDataset
|
||||
|
||||
LongBench_gov_report_reader_cfg = dict(
|
||||
input_columns=['context'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_gov_report_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='You are given a report by a government agency. Write a one-page summary of the report.\n\nReport:\n{context}\n\nNow, write a one-page summary of the report.\n\nSummary:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512)
|
||||
)
|
||||
|
||||
LongBench_gov_report_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchRougeEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_gov_report_datasets = [
|
||||
dict(
|
||||
type=LongBenchgov_reportDataset,
|
||||
abbr='LongBench_gov_report',
|
||||
path='THUDM/LongBench',
|
||||
name='gov_report',
|
||||
reader_cfg=LongBench_gov_report_reader_cfg,
|
||||
infer_cfg=LongBench_gov_report_infer_cfg,
|
||||
eval_cfg=LongBench_gov_report_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_hotpotqa_gen_6b3efc import LongBench_hotpotqa_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchhotpotqaDataset
|
||||
|
||||
LongBench_hotpotqa_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_hotpotqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_hotpotqa_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_hotpotqa_datasets = [
|
||||
dict(
|
||||
type=LongBenchhotpotqaDataset,
|
||||
abbr='LongBench_hotpotqa',
|
||||
path='THUDM/LongBench',
|
||||
name='hotpotqa',
|
||||
reader_cfg=LongBench_hotpotqa_reader_cfg,
|
||||
infer_cfg=LongBench_hotpotqa_infer_cfg,
|
||||
eval_cfg=LongBench_hotpotqa_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_lcc_gen_6ba507 import LongBench_lcc_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchCodeSimEvaluator, LongBenchlccDataset
|
||||
|
||||
LongBench_lcc_reader_cfg = dict(
|
||||
input_columns=['context'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_lcc_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Please complete the code given below. \n{context}Next line of code:\n'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=64)
|
||||
)
|
||||
|
||||
LongBench_lcc_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchCodeSimEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_lcc_datasets = [
|
||||
dict(
|
||||
type=LongBenchlccDataset,
|
||||
abbr='LongBench_lcc',
|
||||
path='THUDM/LongBench',
|
||||
name='lcc',
|
||||
reader_cfg=LongBench_lcc_reader_cfg,
|
||||
infer_cfg=LongBench_lcc_infer_cfg,
|
||||
eval_cfg=LongBench_lcc_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_lsht_gen_e8a339 import LongBench_lsht_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchClassificationEvaluator, LongBenchlshtDataset
|
||||
|
||||
LongBench_lsht_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='all_labels',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_lsht_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='请判断给定新闻的类别,下面是一些例子。\n\n{context}\n{input}'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=64)
|
||||
)
|
||||
|
||||
LongBench_lsht_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchClassificationEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_lsht_datasets = [
|
||||
dict(
|
||||
type=LongBenchlshtDataset,
|
||||
abbr='LongBench_lsht',
|
||||
path='THUDM/LongBench',
|
||||
name='lsht',
|
||||
reader_cfg=LongBench_lsht_reader_cfg,
|
||||
infer_cfg=LongBench_lsht_infer_cfg,
|
||||
eval_cfg=LongBench_lsht_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_multifieldqa_en_gen_d3838e import LongBench_multifieldqa_en_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchmultifieldqa_enDataset
|
||||
|
||||
LongBench_multifieldqa_en_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_multifieldqa_en_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Read the following text and answer briefly.\n\n{context}\n\nNow, answer the following question based on the above text, only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=64)
|
||||
)
|
||||
|
||||
LongBench_multifieldqa_en_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_multifieldqa_en_datasets = [
|
||||
dict(
|
||||
type=LongBenchmultifieldqa_enDataset,
|
||||
abbr='LongBench_multifieldqa_en',
|
||||
path='THUDM/LongBench',
|
||||
name='multifieldqa_en',
|
||||
reader_cfg=LongBench_multifieldqa_en_reader_cfg,
|
||||
infer_cfg=LongBench_multifieldqa_en_infer_cfg,
|
||||
eval_cfg=LongBench_multifieldqa_en_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_multifieldqa_zh_gen_e9a7ef import LongBench_multifieldqa_zh_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchmultifieldqa_zhDataset
|
||||
|
||||
LongBench_multifieldqa_zh_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_multifieldqa_zh_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='阅读以下文字并用中文简短回答:\n\n{context}\n\n现在请基于上面的文章回答下面的问题,只告诉我答案,不要输出任何其他字词。\n\n问题:{input}\n回答:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=64)
|
||||
)
|
||||
|
||||
LongBench_multifieldqa_zh_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator, language='zh'),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_multifieldqa_zh_datasets = [
|
||||
dict(
|
||||
type=LongBenchmultifieldqa_zhDataset,
|
||||
abbr='LongBench_multifieldqa_zh',
|
||||
path='THUDM/LongBench',
|
||||
name='multifieldqa_zh',
|
||||
reader_cfg=LongBench_multifieldqa_zh_reader_cfg,
|
||||
infer_cfg=LongBench_multifieldqa_zh_infer_cfg,
|
||||
eval_cfg=LongBench_multifieldqa_zh_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_musique_gen_6b3efc import LongBench_musique_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchmusiqueDataset
|
||||
|
||||
LongBench_musique_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_musique_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_musique_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_musique_datasets = [
|
||||
dict(
|
||||
type=LongBenchmusiqueDataset,
|
||||
abbr='LongBench_musique',
|
||||
path='THUDM/LongBench',
|
||||
name='musique',
|
||||
reader_cfg=LongBench_musique_reader_cfg,
|
||||
infer_cfg=LongBench_musique_infer_cfg,
|
||||
eval_cfg=LongBench_musique_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_narrativeqa_gen_a68305 import LongBench_narrativeqa_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchnarrativeqaDataset
|
||||
|
||||
LongBench_narrativeqa_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_narrativeqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='You are given a story, which can be either a novel or a movie script, and a question. Answer the question as concisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nStory: {context}\n\nNow, answer the question based on the story as concisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nQuestion: {input}\n\nAnswer:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=128)
|
||||
)
|
||||
|
||||
LongBench_narrativeqa_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_narrativeqa_datasets = [
|
||||
dict(
|
||||
type=LongBenchnarrativeqaDataset,
|
||||
abbr='LongBench_narrativeqa',
|
||||
path='THUDM/LongBench',
|
||||
name='narrativeqa',
|
||||
reader_cfg=LongBench_narrativeqa_reader_cfg,
|
||||
infer_cfg=LongBench_narrativeqa_infer_cfg,
|
||||
eval_cfg=LongBench_narrativeqa_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_nq_gen_d30cb9 import LongBench_nq_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchnqDataset
|
||||
|
||||
LongBench_nq_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_nq_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Answer the question based on the given passage. Only give me the answer and do not output any other words. The following are some examples.\n\n{context}\n\n{input}'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_nq_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_nq_datasets = [
|
||||
dict(
|
||||
type=LongBenchnqDataset,
|
||||
abbr='LongBench_nq',
|
||||
path='THUDM/LongBench',
|
||||
name='nq',
|
||||
reader_cfg=LongBench_nq_reader_cfg,
|
||||
infer_cfg=LongBench_nq_infer_cfg,
|
||||
eval_cfg=LongBench_nq_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_passage_count_gen_dcdaab import LongBench_passage_count_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchCountEvaluator, LongBenchpassage_countDataset
|
||||
|
||||
LongBench_passage_count_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_passage_count_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='There are some paragraphs below sourced from Wikipedia. Some of them may be duplicates. Please carefully read these paragraphs and determine how many unique paragraphs there are after removing duplicates. In other words, how many non-repeating paragraphs are there in total?\n\n{context}\n\nPlease enter the final count of unique paragraphs after removing duplicates. The output format should only contain the number, such as 1, 2, 3, and so on.\n\nThe final answer is: '),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_passage_count_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchCountEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_passage_count_datasets = [
|
||||
dict(
|
||||
type=LongBenchpassage_countDataset,
|
||||
abbr='LongBench_passage_count',
|
||||
path='THUDM/LongBench',
|
||||
name='passage_count',
|
||||
reader_cfg=LongBench_passage_count_reader_cfg,
|
||||
infer_cfg=LongBench_passage_count_infer_cfg,
|
||||
eval_cfg=LongBench_passage_count_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_passage_retrieval_en_gen_734db5 import LongBench_passage_retrieval_en_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchRetrievalEvaluator, LongBenchpassage_retrieval_enDataset
|
||||
|
||||
LongBench_passage_retrieval_en_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_passage_retrieval_en_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Here are 30 paragraphs from Wikipedia, along with an abstract. Please determine which paragraph the abstract is from.\n\n{context}\n\nThe following is an abstract.\n\n{input}\n\nPlease enter the number of the paragraph that the abstract is from. The answer format must be like \"Paragraph 1\", \"Paragraph 2\", etc.\n\nThe answer is: '),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_passage_retrieval_en_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchRetrievalEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_passage_retrieval_en_datasets = [
|
||||
dict(
|
||||
type=LongBenchpassage_retrieval_enDataset,
|
||||
abbr='LongBench_passage_retrieval_en',
|
||||
path='THUDM/LongBench',
|
||||
name='passage_retrieval_en',
|
||||
reader_cfg=LongBench_passage_retrieval_en_reader_cfg,
|
||||
infer_cfg=LongBench_passage_retrieval_en_infer_cfg,
|
||||
eval_cfg=LongBench_passage_retrieval_en_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_passage_retrieval_zh_gen_01cca2 import LongBench_passage_retrieval_zh_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchRetrievalEvaluator, LongBenchpassage_retrieval_zhDataset
|
||||
|
||||
LongBench_passage_retrieval_zh_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_passage_retrieval_zh_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='以下是若干段落文字,以及其中一个段落的摘要。请确定给定的摘要出自哪一段。\n\n{context}\n\n下面是一个摘要\n\n{input}\n\n请输入摘要所属段落的编号。答案格式必须是\"段落1\",\"段落2\"等格式\n\n答案是:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_passage_retrieval_zh_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchRetrievalEvaluator, language='zh'),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_passage_retrieval_zh_datasets = [
|
||||
dict(
|
||||
type=LongBenchpassage_retrieval_zhDataset,
|
||||
abbr='LongBench_passage_retrieval_zh',
|
||||
path='THUDM/LongBench',
|
||||
name='passage_retrieval_zh',
|
||||
reader_cfg=LongBench_passage_retrieval_zh_reader_cfg,
|
||||
infer_cfg=LongBench_passage_retrieval_zh_infer_cfg,
|
||||
eval_cfg=LongBench_passage_retrieval_zh_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_qasper_gen_6b3efc import LongBench_qasper_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchqasperDataset
|
||||
|
||||
LongBench_qasper_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_qasper_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_qasper_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_qasper_datasets = [
|
||||
dict(
|
||||
type=LongBenchqasperDataset,
|
||||
abbr='LongBench_qasper',
|
||||
path='THUDM/LongBench',
|
||||
name='qasper',
|
||||
reader_cfg=LongBench_qasper_reader_cfg,
|
||||
infer_cfg=LongBench_qasper_infer_cfg,
|
||||
eval_cfg=LongBench_qasper_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_qmsum_gen_d33331 import LongBench_qmsum_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchRougeEvaluator, LongBenchqmsumDataset
|
||||
|
||||
LongBench_qmsum_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_qmsum_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='You are given a meeting transcript and a query containing a question or instruction. Answer the query in one or more sentences.\n\nTranscript:\n{context}\n\nNow, answer the query based on the above meeting transcript in one or more sentences.\n\nQuery: {input}\nAnswer:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512)
|
||||
)
|
||||
|
||||
LongBench_qmsum_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchRougeEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_qmsum_datasets = [
|
||||
dict(
|
||||
type=LongBenchqmsumDataset,
|
||||
abbr='LongBench_qmsum',
|
||||
path='THUDM/LongBench',
|
||||
name='qmsum',
|
||||
reader_cfg=LongBench_qmsum_reader_cfg,
|
||||
infer_cfg=LongBench_qmsum_infer_cfg,
|
||||
eval_cfg=LongBench_qmsum_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_repobench_gen_6df953 import LongBench_repobench_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchCodeSimEvaluator, LongBenchrepobenchDataset
|
||||
|
||||
LongBench_repobench_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_repobench_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Please complete the code given below. \n{context}{input}Next line of code:\n'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=64)
|
||||
)
|
||||
|
||||
LongBench_repobench_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchCodeSimEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_repobench_datasets = [
|
||||
dict(
|
||||
type=LongBenchrepobenchDataset,
|
||||
abbr='LongBench_repobench-p',
|
||||
path='THUDM/LongBench',
|
||||
name='repobench-p',
|
||||
reader_cfg=LongBench_repobench_reader_cfg,
|
||||
infer_cfg=LongBench_repobench_infer_cfg,
|
||||
eval_cfg=LongBench_repobench_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_trec_gen_824187 import LongBench_trec_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchClassificationEvaluator, LongBenchtrecDataset
|
||||
|
||||
LongBench_trec_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='all_labels',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_trec_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Please determine the type of the question below. Here are some examples of questions.\n\n{context}\n{input}'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=64)
|
||||
)
|
||||
|
||||
LongBench_trec_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchClassificationEvaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_trec_datasets = [
|
||||
dict(
|
||||
type=LongBenchtrecDataset,
|
||||
abbr='LongBench_trec',
|
||||
path='THUDM/LongBench',
|
||||
name='trec',
|
||||
reader_cfg=LongBench_trec_reader_cfg,
|
||||
infer_cfg=LongBench_trec_infer_cfg,
|
||||
eval_cfg=LongBench_trec_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_triviaqa_gen_d30cb9 import LongBench_triviaqa_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchF1Evaluator, LongBenchtriviaqaDataset
|
||||
|
||||
LongBench_triviaqa_reader_cfg = dict(
|
||||
input_columns=['context', 'input'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_triviaqa_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='Answer the question based on the given passage. Only give me the answer and do not output any other words. The following are some examples.\n\n{context}\n\n{input}'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=32)
|
||||
)
|
||||
|
||||
LongBench_triviaqa_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchF1Evaluator),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_triviaqa_datasets = [
|
||||
dict(
|
||||
type=LongBenchtriviaqaDataset,
|
||||
abbr='LongBench_triviaqa',
|
||||
path='THUDM/LongBench',
|
||||
name='triviaqa',
|
||||
reader_cfg=LongBench_triviaqa_reader_cfg,
|
||||
infer_cfg=LongBench_triviaqa_infer_cfg,
|
||||
eval_cfg=LongBench_triviaqa_eval_cfg)
|
||||
]
|
@ -0,0 +1,4 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .longbench_vcsum_gen_f7a8ac import LongBench_vcsum_datasets # noqa: F401, F403
|
@ -0,0 +1,38 @@
|
||||
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
||||
from opencompass.openicl.icl_retriever import ZeroRetriever
|
||||
from opencompass.openicl.icl_inferencer import GenInferencer
|
||||
from opencompass.datasets import LongBenchRougeEvaluator, LongBenchvcsumDataset
|
||||
|
||||
LongBench_vcsum_reader_cfg = dict(
|
||||
input_columns=['context'],
|
||||
output_column='answers',
|
||||
train_split='test',
|
||||
test_split='test'
|
||||
)
|
||||
|
||||
LongBench_vcsum_infer_cfg = dict(
|
||||
prompt_template=dict(
|
||||
type=PromptTemplate,
|
||||
template=dict(
|
||||
round=[
|
||||
dict(role='HUMAN', prompt='下面有一段会议记录,请你阅读后,写一段总结,总结会议的内容。\n会议记录:\n{context}\n\n会议总结:'),
|
||||
], )),
|
||||
retriever=dict(type=ZeroRetriever),
|
||||
inferencer=dict(type=GenInferencer, max_out_len=512)
|
||||
)
|
||||
|
||||
LongBench_vcsum_eval_cfg = dict(
|
||||
evaluator=dict(type=LongBenchRougeEvaluator, language='zh'),
|
||||
pred_role='BOT'
|
||||
)
|
||||
|
||||
LongBench_vcsum_datasets = [
|
||||
dict(
|
||||
type=LongBenchvcsumDataset,
|
||||
abbr='LongBench_vcsum',
|
||||
path='THUDM/LongBench',
|
||||
name='vcsum',
|
||||
reader_cfg=LongBench_vcsum_reader_cfg,
|
||||
infer_cfg=LongBench_vcsum_infer_cfg,
|
||||
eval_cfg=LongBench_vcsum_eval_cfg)
|
||||
]
|
35
configs/summarizers/longbench.py
Normal file
35
configs/summarizers/longbench.py
Normal file
@ -0,0 +1,35 @@
|
||||
summarizer = dict(
|
||||
dataset_abbrs = [
|
||||
'--------- LongBench Single-Document QA ---------', # category
|
||||
"LongBench_narrativeqa",
|
||||
'LongBench_qasper',
|
||||
'LongBench_multifieldqa_en',
|
||||
"LongBench_multifieldqa_zh",
|
||||
'--------- LongBench Multi-Document QA ---------', # category
|
||||
'LongBench_hotpotqa',
|
||||
'LongBench_2wikimqa',
|
||||
'LongBench_musique',
|
||||
'LongBench_dureader',
|
||||
'--------- LongBench Summarization ---------', # category
|
||||
'LongBench_gov_report',
|
||||
'LongBench_qmsum',
|
||||
'LongBench_vcsum',
|
||||
'--------- LongBench Few-shot Learning ---------', # category
|
||||
'LongBench_trec',
|
||||
'LongBench_nq',
|
||||
'LongBench_triviaqa',
|
||||
'LongBench_lsht',
|
||||
'--------- LongBench Code Completion ---------', # category
|
||||
'LongBench_lcc',
|
||||
'LongBench_repobench-p',
|
||||
'--------- LongBench Synthetic Tasks ---------', # category
|
||||
'LongBench_passage_retrieval_en',
|
||||
'LongBench_passage_count',
|
||||
'LongBench_passage_retrieval_zh',
|
||||
],
|
||||
summary_groups=sum([v for k, v in locals().items() if k.endswith("_summary_groups")], []),
|
||||
prompt_db=dict(
|
||||
database_path='configs/datasets/log.json',
|
||||
config_dir='configs/datasets',
|
||||
blacklist='.promptignore'),
|
||||
)
|
@ -54,6 +54,7 @@ from .LEval_scientific_qa import * # noqa: F401, F403
|
||||
from .LEval_topic_retrieval import * # noqa: F401, F403
|
||||
from .LEval_tpo import * # noqa: F401, F403
|
||||
from .LEval_tvshow_summ import * # noqa: F401, F403
|
||||
from .longbench import * # noqa: F401, F403
|
||||
from .math import * # noqa: F401, F403
|
||||
from .mbpp import * # noqa: F401, F403
|
||||
from .mmlu import * # noqa: F401, F403
|
||||
|
26
opencompass/datasets/longbench/__init__.py
Normal file
26
opencompass/datasets/longbench/__init__.py
Normal file
@ -0,0 +1,26 @@
|
||||
from .evaluators import LongBenchClassificationEvaluator # noqa: F401, F403
|
||||
from .evaluators import LongBenchCodeSimEvaluator # noqa: F401, F403
|
||||
from .evaluators import LongBenchCountEvaluator # noqa: F401, F403
|
||||
from .evaluators import LongBenchF1Evaluator # noqa: F401, F403
|
||||
from .evaluators import LongBenchRetrievalEvaluator # noqa: F401, F403
|
||||
from .evaluators import LongBenchRougeEvaluator # noqa: F401, F403
|
||||
from .longbench_2wikim_qa import * # noqa: F401, F403
|
||||
from .longbench_dureader import * # noqa: F401, F403
|
||||
from .longbench_gov_report import * # noqa: F401, F403
|
||||
from .longbench_hotpot_qa import * # noqa: F401, F403
|
||||
from .longbench_lcc import * # noqa: F401, F403
|
||||
from .longbench_lsht import * # noqa: F401, F403
|
||||
from .longbench_multifieldqa_en import * # noqa: F401, F403
|
||||
from .longbench_multifieldqa_zh import * # noqa: F401, F403
|
||||
from .longbench_musique import * # noqa: F401, F403
|
||||
from .longbench_narrative_qa import * # noqa: F401, F403
|
||||
from .longbench_nq import * # noqa: F401, F403
|
||||
from .longbench_passage_count import * # noqa: F401, F403
|
||||
from .longbench_passage_retrieval_en import * # noqa: F401, F403
|
||||
from .longbench_passage_retrieval_zh import * # noqa: F401, F403
|
||||
from .longbench_qasper import * # noqa: F401, F403
|
||||
from .longbench_qmsum import * # noqa: F401, F403
|
||||
from .longbench_repobench import * # noqa: F401, F403
|
||||
from .longbench_trec import * # noqa: F401, F403
|
||||
from .longbench_trivia_qa import * # noqa: F401, F403
|
||||
from .longbench_vcsum import * # noqa: F401, F403
|
264
opencompass/datasets/longbench/evaluators.py
Normal file
264
opencompass/datasets/longbench/evaluators.py
Normal file
@ -0,0 +1,264 @@
|
||||
import difflib
|
||||
import re
|
||||
import string
|
||||
from collections import Counter
|
||||
from typing import List
|
||||
|
||||
import jieba
|
||||
from fuzzywuzzy import fuzz
|
||||
from rouge import Rouge
|
||||
|
||||
from opencompass.openicl.icl_evaluator import BaseEvaluator
|
||||
from opencompass.registry import ICL_EVALUATORS
|
||||
|
||||
|
||||
def normalize_answer(s):
|
||||
"""Lower text and remove punctuation, articles and extra whitespace."""
|
||||
|
||||
def remove_articles(text):
|
||||
return re.sub(r'\b(a|an|the)\b', ' ', text)
|
||||
|
||||
def white_space_fix(text):
|
||||
return ' '.join(text.split())
|
||||
|
||||
def remove_punc(text):
|
||||
exclude = set(string.punctuation)
|
||||
return ''.join(ch for ch in text if ch not in exclude)
|
||||
|
||||
def lower(text):
|
||||
return text.lower()
|
||||
|
||||
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
||||
|
||||
|
||||
def normalize_zh_answer(s):
|
||||
"""Lower text and remove punctuation, extra whitespace."""
|
||||
|
||||
def white_space_fix(text):
|
||||
return ''.join(text.split())
|
||||
|
||||
def remove_punc(text):
|
||||
cn_punctuation = '!?。。"#$%&'()*+,-/:;<=>@[\]^_`\
|
||||
{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.'
|
||||
|
||||
all_punctuation = set(string.punctuation + cn_punctuation)
|
||||
return ''.join(ch for ch in text if ch not in all_punctuation)
|
||||
|
||||
def lower(text):
|
||||
return text.lower()
|
||||
|
||||
return white_space_fix(remove_punc(lower(s)))
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LongBenchF1Evaluator(BaseEvaluator):
|
||||
|
||||
def __init__(self, language: str = 'en') -> None:
|
||||
super().__init__()
|
||||
assert language in ['en', 'zh']
|
||||
self.language = language
|
||||
|
||||
def score(self, predictions: List, references: List) -> dict:
|
||||
|
||||
def f1_score(prediction, reference, **kwargs):
|
||||
common = Counter(prediction) & Counter(reference)
|
||||
num_same = sum(common.values())
|
||||
if num_same == 0:
|
||||
return 0
|
||||
precision = 1.0 * num_same / len(prediction)
|
||||
recall = 1.0 * num_same / len(reference)
|
||||
f1 = (2 * precision * recall) / (precision + recall)
|
||||
return f1
|
||||
|
||||
score = 0.
|
||||
for i in range(len(predictions)):
|
||||
prediction = predictions[i]
|
||||
reference_list = references[i]
|
||||
task_score = 0.
|
||||
for reference in reference_list:
|
||||
if self.language == 'en':
|
||||
normalized_prediction = normalize_answer(prediction)
|
||||
normalized_reference = normalize_answer(reference)
|
||||
|
||||
prediction_tokens = normalized_prediction.split()
|
||||
reference_tokens = normalized_reference.split()
|
||||
|
||||
else:
|
||||
prediction_tokens = list(
|
||||
jieba.cut(prediction, cut_all=False))
|
||||
reference_tokens = list(jieba.cut(reference,
|
||||
cut_all=False))
|
||||
prediction_tokens = [
|
||||
normalize_zh_answer(token)
|
||||
for token in prediction_tokens
|
||||
]
|
||||
reference_tokens = [
|
||||
normalize_zh_answer(token)
|
||||
for token in reference_tokens
|
||||
]
|
||||
prediction_tokens = [
|
||||
token for token in prediction_tokens if len(token) > 0
|
||||
]
|
||||
reference_tokens = [
|
||||
token for token in reference_tokens if len(token) > 0
|
||||
]
|
||||
|
||||
task_score = max(task_score,
|
||||
f1_score(prediction_tokens, reference_tokens))
|
||||
|
||||
score += task_score
|
||||
|
||||
score = score / len(predictions) * 100
|
||||
return {'score': score}
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LongBenchCountEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions: List, references: List) -> dict:
|
||||
score = 0.
|
||||
for i in range(len(predictions)):
|
||||
prediction = predictions[i]
|
||||
reference_list = references[i]
|
||||
for reference in reference_list:
|
||||
numbers = re.findall(r'\d+', prediction)
|
||||
right_num = 0
|
||||
for number in numbers:
|
||||
if str(number) == str(reference):
|
||||
right_num += 1
|
||||
score += 0.0 if len(numbers) == 0 else float(right_num /
|
||||
len(numbers))
|
||||
|
||||
score = score / len(predictions) * 100
|
||||
return {'score': score}
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LongBenchRetrievalEvaluator(BaseEvaluator):
|
||||
|
||||
def __init__(self, language: str = 'en') -> None:
|
||||
super().__init__()
|
||||
assert language in ['en', 'zh']
|
||||
self.language = language
|
||||
|
||||
def score(self, predictions: List, references: List) -> dict:
|
||||
score = 0.
|
||||
for i in range(len(predictions)):
|
||||
prediction = predictions[i]
|
||||
reference_list = references[i]
|
||||
for reference in reference_list:
|
||||
if self.language == 'en':
|
||||
pattern = r'Paragraph (\d+)'
|
||||
else:
|
||||
pattern = r'段落(\d+)'
|
||||
|
||||
matches = re.findall(pattern, reference)
|
||||
reference_id = matches[0]
|
||||
numbers = re.findall(r'\d+', prediction)
|
||||
right_num = 0
|
||||
for number in numbers:
|
||||
if str(number) == str(reference_id):
|
||||
right_num += 1
|
||||
|
||||
score += 0.0 if len(numbers) == 0 else float(right_num /
|
||||
len(numbers))
|
||||
|
||||
score = score / len(predictions) * 100
|
||||
return {'score': score}
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LongBenchRougeEvaluator(BaseEvaluator):
|
||||
|
||||
def __init__(self, language: str = 'en') -> None:
|
||||
super().__init__()
|
||||
assert language in ['en', 'zh']
|
||||
self.language = language
|
||||
|
||||
def score(self, predictions: List, references: List) -> dict:
|
||||
score = 0.
|
||||
for i in range(len(predictions)):
|
||||
prediction = predictions[i]
|
||||
reference_list = references[i]
|
||||
task_score = 0.
|
||||
for reference in reference_list:
|
||||
if self.language == 'zh':
|
||||
prediction = ' '.join(
|
||||
list(jieba.cut(prediction, cut_all=False)))
|
||||
reference = ' '.join(
|
||||
list(jieba.cut(reference, cut_all=False)))
|
||||
|
||||
rouge = Rouge()
|
||||
if prediction != '':
|
||||
cur_score = rouge.get_scores([prediction], [reference],
|
||||
avg=True)['rouge-l']['f']
|
||||
else:
|
||||
cur_score = 0.
|
||||
task_score = max(task_score, cur_score)
|
||||
|
||||
score += task_score
|
||||
|
||||
score = score / len(predictions) * 100
|
||||
return {'score': score}
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LongBenchCodeSimEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions: List, references: List) -> dict:
|
||||
score = 0.
|
||||
for i in range(len(predictions)):
|
||||
prediction = predictions[i]
|
||||
reference_list = references[i]
|
||||
task_score = 0.
|
||||
for reference in reference_list:
|
||||
all_lines = prediction.lstrip('\n').split('\n')
|
||||
prediction = ''
|
||||
for line in all_lines:
|
||||
if ('`' not in line) and ('#'
|
||||
not in line) and ('//'
|
||||
not in line):
|
||||
prediction = line
|
||||
break
|
||||
task_score = max(task_score,
|
||||
(fuzz.ratio(prediction, reference) / 100))
|
||||
|
||||
score += task_score
|
||||
|
||||
score = score / len(predictions) * 100
|
||||
return {'score': score}
|
||||
|
||||
|
||||
@ICL_EVALUATORS.register_module()
|
||||
class LongBenchClassificationEvaluator(BaseEvaluator):
|
||||
|
||||
def score(self, predictions: List, references: List) -> dict:
|
||||
score = 0.
|
||||
for i in range(len(predictions)):
|
||||
prediction = predictions[i]
|
||||
reference_list = references[i]['answers']
|
||||
for reference in reference_list:
|
||||
em_match_list = []
|
||||
all_classes = references[i]['all_classes']
|
||||
for class_name in all_classes:
|
||||
if class_name in prediction:
|
||||
em_match_list.append(class_name)
|
||||
for match_term in em_match_list:
|
||||
if match_term in reference and match_term != reference:
|
||||
em_match_list.remove(match_term)
|
||||
if em_match_list != 0:
|
||||
if reference in em_match_list:
|
||||
score += (1.0 / len(em_match_list))
|
||||
else:
|
||||
best_match = None
|
||||
highest_similarity = 0
|
||||
for names in all_classes:
|
||||
similarity = difflib.SequenceMatcher(
|
||||
None, names, prediction).ratio()
|
||||
if similarity > highest_similarity:
|
||||
highest_similarity = similarity
|
||||
best_match = names
|
||||
score += float(best_match == reference)
|
||||
|
||||
score = score / len(predictions) * 100
|
||||
return {'score': score}
|
26
opencompass/datasets/longbench/longbench_2wikim_qa.py
Normal file
26
opencompass/datasets/longbench/longbench_2wikim_qa.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBench2wikimqaDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_dureader.py
Normal file
26
opencompass/datasets/longbench/longbench_dureader.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchdureaderDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
21
opencompass/datasets/longbench/longbench_gov_report.py
Normal file
21
opencompass/datasets/longbench/longbench_gov_report.py
Normal file
@ -0,0 +1,21 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchgov_reportDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({'context': context, 'answers': answers})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_hotpot_qa.py
Normal file
26
opencompass/datasets/longbench/longbench_hotpot_qa.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchhotpotqaDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
21
opencompass/datasets/longbench/longbench_lcc.py
Normal file
21
opencompass/datasets/longbench/longbench_lcc.py
Normal file
@ -0,0 +1,21 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchlccDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({'context': context, 'answers': answers})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
30
opencompass/datasets/longbench/longbench_lsht.py
Normal file
30
opencompass/datasets/longbench/longbench_lsht.py
Normal file
@ -0,0 +1,30 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchlshtDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
all_classes = dataset[split]['all_classes'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'all_labels': {
|
||||
'answers': answers,
|
||||
'all_classes': all_classes
|
||||
}
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_multifieldqa_en.py
Normal file
26
opencompass/datasets/longbench/longbench_multifieldqa_en.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchmultifieldqa_enDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_multifieldqa_zh.py
Normal file
26
opencompass/datasets/longbench/longbench_multifieldqa_zh.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchmultifieldqa_zhDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_musique.py
Normal file
26
opencompass/datasets/longbench/longbench_musique.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchmusiqueDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_narrative_qa.py
Normal file
26
opencompass/datasets/longbench/longbench_narrative_qa.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchnarrativeqaDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_nq.py
Normal file
26
opencompass/datasets/longbench/longbench_nq.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchnqDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
21
opencompass/datasets/longbench/longbench_passage_count.py
Normal file
21
opencompass/datasets/longbench/longbench_passage_count.py
Normal file
@ -0,0 +1,21 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchpassage_countDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({'context': context, 'answers': answers})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchpassage_retrieval_enDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchpassage_retrieval_zhDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_qasper.py
Normal file
26
opencompass/datasets/longbench/longbench_qasper.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchqasperDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_qmsum.py
Normal file
26
opencompass/datasets/longbench/longbench_qmsum.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchqmsumDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_repobench.py
Normal file
26
opencompass/datasets/longbench/longbench_repobench.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchrepobenchDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
30
opencompass/datasets/longbench/longbench_trec.py
Normal file
30
opencompass/datasets/longbench/longbench_trec.py
Normal file
@ -0,0 +1,30 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchtrecDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
all_classes = dataset[split]['all_classes'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'all_labels': {
|
||||
'answers': answers,
|
||||
'all_classes': all_classes
|
||||
}
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
26
opencompass/datasets/longbench/longbench_trivia_qa.py
Normal file
26
opencompass/datasets/longbench/longbench_trivia_qa.py
Normal file
@ -0,0 +1,26 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchtriviaqaDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
question = dataset[split]['input'][i]
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({
|
||||
'input': question,
|
||||
'context': context,
|
||||
'answers': answers
|
||||
})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
21
opencompass/datasets/longbench/longbench_vcsum.py
Normal file
21
opencompass/datasets/longbench/longbench_vcsum.py
Normal file
@ -0,0 +1,21 @@
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from opencompass.registry import LOAD_DATASET
|
||||
|
||||
from ..base import BaseDataset
|
||||
|
||||
|
||||
@LOAD_DATASET.register_module()
|
||||
class LongBenchvcsumDataset(BaseDataset):
|
||||
|
||||
@staticmethod
|
||||
def load(**kwargs):
|
||||
dataset = load_dataset(**kwargs)
|
||||
split = 'test'
|
||||
raw_data = []
|
||||
for i in range(len(dataset[split])):
|
||||
context = dataset[split]['context'][i]
|
||||
answers = dataset[split]['answers'][i]
|
||||
raw_data.append({'context': context, 'answers': answers})
|
||||
dataset[split] = Dataset.from_list(raw_data)
|
||||
return dataset
|
@ -1,10 +1,12 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from threading import Lock
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import jieba
|
||||
import requests
|
||||
|
||||
from opencompass.registry import MODELS
|
||||
@ -42,6 +44,9 @@ class OpenAI(BaseAPIModel):
|
||||
wrapping of any meta instructions.
|
||||
openai_api_base (str): The base url of OpenAI's API. Defaults to
|
||||
'https://api.openai.com/v1/chat/completions'.
|
||||
mode (str, optional): The method of input truncation when input length
|
||||
exceeds max_seq_len. 'front','mid' and 'rear' represents the part
|
||||
of input to truncate. Defaults to 'none'.
|
||||
temperature (float, optional): What sampling temperature to use.
|
||||
If not None, will override the temperature in the `generate()`
|
||||
call. Defaults to None.
|
||||
@ -58,6 +63,7 @@ class OpenAI(BaseAPIModel):
|
||||
org: Optional[Union[str, List[str]]] = None,
|
||||
meta_template: Optional[Dict] = None,
|
||||
openai_api_base: str = OPENAI_API_BASE,
|
||||
mode: str = 'none',
|
||||
temperature: Optional[float] = None):
|
||||
|
||||
super().__init__(path=path,
|
||||
@ -68,6 +74,8 @@ class OpenAI(BaseAPIModel):
|
||||
import tiktoken
|
||||
self.tiktoken = tiktoken
|
||||
self.temperature = temperature
|
||||
assert mode in ['none', 'front', 'mid', 'rear']
|
||||
self.mode = mode
|
||||
|
||||
if isinstance(key, str):
|
||||
self.keys = [os.getenv('OPENAI_API_KEY') if key == 'ENV' else key]
|
||||
@ -137,6 +145,20 @@ class OpenAI(BaseAPIModel):
|
||||
"""
|
||||
assert isinstance(input, (str, PromptList))
|
||||
|
||||
# max num token for gpt-3.5-turbo is 4097
|
||||
context_window = 4096
|
||||
if '32k' in self.path:
|
||||
context_window = 32768
|
||||
elif '16k' in self.path:
|
||||
context_window = 16384
|
||||
elif 'gpt-4' in self.path:
|
||||
context_window = 8192
|
||||
|
||||
# will leave 100 tokens as prompt buffer, triggered if input is str
|
||||
if isinstance(input, str) and self.mode != 'none':
|
||||
context_window = self.max_seq_len
|
||||
input = self.bin_trim(input, context_window - 100 - max_out_len)
|
||||
|
||||
if isinstance(input, str):
|
||||
messages = [{'role': 'user', 'content': input}]
|
||||
else:
|
||||
@ -151,15 +173,6 @@ class OpenAI(BaseAPIModel):
|
||||
msg['role'] = 'system'
|
||||
messages.append(msg)
|
||||
|
||||
# max num token for gpt-3.5-turbo is 4097
|
||||
context_window = 4096
|
||||
if '32k' in self.path:
|
||||
context_window = 32768
|
||||
elif '16k' in self.path:
|
||||
context_window = 16384
|
||||
elif 'gpt-4' in self.path:
|
||||
context_window = 8192
|
||||
|
||||
# Hold out 100 tokens due to potential errors in tiktoken calculation
|
||||
max_out_len = min(
|
||||
max_out_len, context_window - self.get_token_len(str(input)) - 100)
|
||||
@ -251,3 +264,45 @@ class OpenAI(BaseAPIModel):
|
||||
"""
|
||||
enc = self.tiktoken.encoding_for_model(self.path)
|
||||
return len(enc.encode(prompt))
|
||||
|
||||
def bin_trim(self, prompt: str, num_token: int) -> str:
|
||||
"""Get a suffix of prompt which is no longer than num_token tokens.
|
||||
|
||||
Args:
|
||||
prompt (str): Input string.
|
||||
num_token (int): The upper bound of token numbers.
|
||||
|
||||
Returns:
|
||||
str: The trimmed prompt.
|
||||
"""
|
||||
token_len = self.get_token_len(prompt)
|
||||
if token_len <= num_token:
|
||||
return prompt
|
||||
pattern = re.compile(r'[\u4e00-\u9fa5]')
|
||||
if pattern.search(prompt):
|
||||
words = list(jieba.cut(prompt, cut_all=False))
|
||||
else:
|
||||
words = prompt.split(' ')
|
||||
|
||||
l, r = 1, len(words)
|
||||
while l + 2 < r:
|
||||
mid = (l + r) // 2
|
||||
if self.mode == 'front':
|
||||
cur_prompt = ' '.join(words[-mid:])
|
||||
elif self.mode == 'mid':
|
||||
cur_prompt = ' '.join(words[:mid]) + ' '.join(words[-mid:])
|
||||
elif self.mode == 'rear':
|
||||
cur_prompt = ' '.join(words[:mid])
|
||||
|
||||
if self.get_token_len(cur_prompt) <= num_token:
|
||||
l = mid # noqa: E741
|
||||
else:
|
||||
r = mid
|
||||
|
||||
if self.mode == 'front':
|
||||
prompt = ' '.join(words[-l:])
|
||||
elif self.mode == 'mid':
|
||||
prompt = ' '.join(words[:l]) + ' '.join(words[-l:])
|
||||
elif self.mode == 'rear':
|
||||
prompt = ' '.join(words[:l])
|
||||
return prompt
|
||||
|
@ -7,6 +7,7 @@ datasets>=2.12.0
|
||||
evaluate>=0.3.0
|
||||
fairscale
|
||||
faiss_gpu==1.7.2
|
||||
fuzzywuzzy
|
||||
jieba
|
||||
mmengine>=0.8.2
|
||||
nltk==3.8
|
||||
@ -16,6 +17,7 @@ pandas<2.0.0
|
||||
rank_bm25==0.2.2
|
||||
rapidfuzz
|
||||
requests==2.31.0
|
||||
rouge
|
||||
rouge_score
|
||||
scikit_learn==1.2.1
|
||||
sentence_transformers==2.2.2
|
||||
|
Loading…
Reference in New Issue
Block a user