2024-01-24 12:11:47 +08:00
|
|
|
from mmengine.config import read_base
|
2024-02-05 15:55:58 +08:00
|
|
|
|
2024-01-24 12:11:47 +08:00
|
|
|
with read_base():
|
2024-03-11 17:24:39 +08:00
|
|
|
from .datasets.subjective.multiround.mtbench_single_judge_diff_temp import subjective_datasets
|
2024-01-24 12:11:47 +08:00
|
|
|
|
2024-03-22 19:54:19 +08:00
|
|
|
from opencompass.models import HuggingFaceCausalLM, HuggingFace, HuggingFaceChatGLM3, OpenAI
|
2024-01-24 12:11:47 +08:00
|
|
|
from opencompass.partitioners import NaivePartitioner, SizePartitioner
|
|
|
|
from opencompass.partitioners.sub_naive import SubjectiveNaivePartitioner
|
|
|
|
from opencompass.partitioners.sub_size import SubjectiveSizePartitioner
|
|
|
|
from opencompass.runners import LocalRunner
|
|
|
|
from opencompass.runners import SlurmSequentialRunner
|
|
|
|
from opencompass.tasks import OpenICLInferTask
|
|
|
|
from opencompass.tasks.subjective_eval import SubjectiveEvalTask
|
|
|
|
from opencompass.summarizers import MTBenchSummarizer
|
|
|
|
|
2024-02-05 15:55:58 +08:00
|
|
|
api_meta_template = dict(
|
|
|
|
round=[
|
2024-02-06 21:26:47 +08:00
|
|
|
dict(role='SYSTEM', api_role='SYSTEM'),
|
2024-02-05 15:55:58 +08:00
|
|
|
dict(role='HUMAN', api_role='HUMAN'),
|
|
|
|
dict(role='BOT', api_role='BOT', generate=True),
|
|
|
|
]
|
|
|
|
)
|
2024-01-24 12:11:47 +08:00
|
|
|
|
2024-03-11 17:24:39 +08:00
|
|
|
_meta_template = dict(
|
|
|
|
round=[
|
2024-05-14 15:35:58 +08:00
|
|
|
dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
|
|
|
|
dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
|
2024-03-11 17:24:39 +08:00
|
|
|
],
|
|
|
|
)
|
2024-02-05 15:55:58 +08:00
|
|
|
# -------------Inference Stage ----------------------------------------
|
|
|
|
# For subjective evaluation, we often set do sample for models
|
|
|
|
models = [
|
|
|
|
dict(
|
2024-03-11 17:24:39 +08:00
|
|
|
type=HuggingFaceCausalLM,
|
|
|
|
abbr='qwen-7b-chat-hf',
|
2024-05-14 15:35:58 +08:00
|
|
|
path='Qwen/Qwen-7B-Chat',
|
2024-03-11 17:24:39 +08:00
|
|
|
tokenizer_path='Qwen/Qwen-7B-Chat',
|
2024-02-05 15:55:58 +08:00
|
|
|
model_kwargs=dict(
|
|
|
|
device_map='auto',
|
2024-03-11 17:24:39 +08:00
|
|
|
trust_remote_code=True
|
2024-02-05 15:55:58 +08:00
|
|
|
),
|
|
|
|
tokenizer_kwargs=dict(
|
|
|
|
padding_side='left',
|
|
|
|
truncation_side='left',
|
|
|
|
trust_remote_code=True,
|
2024-03-11 17:24:39 +08:00
|
|
|
use_fast=False,
|
2024-02-05 15:55:58 +08:00
|
|
|
),
|
2024-03-11 17:24:39 +08:00
|
|
|
pad_token_id=151643,
|
|
|
|
max_out_len=100,
|
|
|
|
max_seq_len=2048,
|
|
|
|
batch_size=8,
|
|
|
|
meta_template=_meta_template,
|
2024-02-05 15:55:58 +08:00
|
|
|
run_cfg=dict(num_gpus=1, num_procs=1),
|
2024-03-11 17:24:39 +08:00
|
|
|
end_str='<|im_end|>',
|
2024-02-05 15:55:58 +08:00
|
|
|
)
|
|
|
|
]
|
|
|
|
|
|
|
|
datasets = [*subjective_datasets]
|
2024-01-24 12:11:47 +08:00
|
|
|
|
|
|
|
# -------------Evalation Stage ----------------------------------------
|
|
|
|
|
|
|
|
## ------------- JudgeLLM Configuration
|
2024-04-02 11:52:06 +08:00
|
|
|
judge_models = [dict(
|
2024-02-05 15:55:58 +08:00
|
|
|
abbr='GPT4-Turbo',
|
2024-03-22 19:54:19 +08:00
|
|
|
type=OpenAI,
|
2024-02-06 21:26:47 +08:00
|
|
|
path='gpt-4-0613', # To compare with the official leaderboard, please use gpt4-0613
|
2024-02-05 15:55:58 +08:00
|
|
|
key='xxxx', # The key will be obtained from $OPENAI_API_KEY, but you can write down your key here as well
|
|
|
|
meta_template=api_meta_template,
|
|
|
|
query_per_second=16,
|
2024-02-06 21:26:47 +08:00
|
|
|
max_out_len=2048,
|
2024-02-05 15:55:58 +08:00
|
|
|
max_seq_len=2048,
|
|
|
|
batch_size=8,
|
|
|
|
temperature=0,
|
2024-04-02 11:52:06 +08:00
|
|
|
)]
|
2024-01-24 12:11:47 +08:00
|
|
|
|
|
|
|
## single evaluation
|
|
|
|
eval = dict(
|
2024-04-02 11:52:06 +08:00
|
|
|
partitioner=dict(type=SubjectiveSizePartitioner, strategy='split', max_task_size=10000, mode='singlescore', models=models, judge_models=judge_models),
|
|
|
|
runner=dict(type=LocalRunner, max_num_workers=32, task=dict(type=SubjectiveEvalTask)),
|
2024-01-24 12:11:47 +08:00
|
|
|
)
|
|
|
|
|
2024-02-05 15:55:58 +08:00
|
|
|
summarizer = dict(type=MTBenchSummarizer, judge_type='single')
|
2024-01-24 12:11:47 +08:00
|
|
|
|
|
|
|
work_dir = 'outputs/mtbench/'
|