OpenCompass/examples/eval_OpenHuEval_HuLifeQA.py

106 lines
3.5 KiB
Python
Raw Normal View History

2025-01-24 10:32:17 +08:00
from mmengine.config import read_base
with read_base():
from opencompass.configs.datasets.OpenHuEval.HuLifeQA import (
hu_life_qa_datasets,
task_group_new,
)
2025-02-07 14:32:27 +08:00
from opencompass.configs.models.openai.gpt_4o_mini_20240718 import models as gpt_4o_mini_20240718_model
from opencompass.configs.models.openai.gpt_4o_2024_11_20 import models as gpt_4o_20241120_model
from opencompass.configs.models.deepseek.deepseek_v3_api_siliconflow import models as deepseek_v3_api_siliconflow_model
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_7b_instruct import models as lmdeploy_qwen2_5_7b_instruct_model
from opencompass.configs.models.qwen2_5.lmdeploy_qwen2_5_72b_instruct import models as lmdeploy_qwen2_5_72b_instruct_model
from opencompass.configs.models.hf_llama.lmdeploy_llama3_1_8b_instruct import models as lmdeploy_llama3_1_8b_instruct_model
from opencompass.configs.models.hf_llama.lmdeploy_llama3_1_70b_instruct import models as lmdeploy_llama3_1_70b_instruct_model
from opencompass.configs.models.hf_internlm.lmdeploy_internlm3_8b_instruct import models as lmdeploy_internlm3_8b_instruct_model
from opencompass.configs.models.qwq.lmdeploy_qwq_32b_preview import models as lmdeploy_qwq_32b_preview_model
from opencompass.configs.models.deepseek.deepseek_r1_siliconflow import models as deepseek_r1_siliconflow_model
2025-01-24 10:32:17 +08:00
from opencompass.models import OpenAI
from opencompass.partitioners import (
NumWorkerPartitioner,
SubjectiveNumWorkerPartitioner,
)
from opencompass.runners import LocalRunner, SlurmSequentialRunner
from opencompass.summarizers import WildBenchSingleSummarizer
from opencompass.tasks import OpenICLInferTask
from opencompass.tasks.subjective_eval import SubjectiveEvalTask
2025-02-04 12:21:29 +08:00
api_meta_template = dict(round=[
dict(role='SYSTEM', api_role='SYSTEM'),
dict(role='HUMAN', api_role='HUMAN'),
dict(role='BOT', api_role='BOT', generate=True),
])
2025-01-24 10:32:17 +08:00
models = [
# *gpt_4o_mini_20240718_model,
2025-02-07 14:32:27 +08:00
# *gpt_4o_20241120_model,
# *deepseek_v3_api_siliconflow_model,
2025-02-04 12:21:29 +08:00
*lmdeploy_qwen2_5_7b_instruct_model,
2025-02-07 14:32:27 +08:00
*lmdeploy_qwen2_5_72b_instruct_model,
*lmdeploy_llama3_1_8b_instruct_model,
*lmdeploy_llama3_1_70b_instruct_model,
2025-02-04 12:21:29 +08:00
*lmdeploy_internlm3_8b_instruct_model,
*lmdeploy_qwq_32b_preview_model,
2025-01-24 10:32:17 +08:00
]
judge_models = [
dict(
2025-02-04 12:21:29 +08:00
abbr='GPT-4o-2024-08-06',
2025-01-24 10:32:17 +08:00
type=OpenAI,
2025-02-04 12:21:29 +08:00
path='gpt-4o-2024-08-06',
key='ENV',
openai_proxy_url='ENV',
verbose=True,
2025-01-24 10:32:17 +08:00
meta_template=api_meta_template,
2025-02-04 12:21:29 +08:00
query_per_second=2,
max_out_len=8192,
max_seq_len=8192,
2025-01-24 10:32:17 +08:00
batch_size=8,
temperature=0,
)
]
for ds in hu_life_qa_datasets:
2025-02-04 12:21:29 +08:00
ds.update(dict(mode='singlescore', eval_mode='single'))
2025-01-24 10:32:17 +08:00
del ds
datasets = [*hu_life_qa_datasets]
del hu_life_qa_datasets
infer = dict(
partitioner=dict(
type=NumWorkerPartitioner,
num_worker=8,
),
runner=dict(
type=SlurmSequentialRunner,
max_num_workers=16,
task=dict(type=OpenICLInferTask),
),
)
eval = dict(
partitioner=dict(
type=SubjectiveNumWorkerPartitioner,
num_worker=8,
models=models,
judge_models=judge_models,
),
2025-02-04 12:21:29 +08:00
runner=dict(type=LocalRunner,
max_num_workers=16,
task=dict(type=SubjectiveEvalTask)),
2025-01-24 10:32:17 +08:00
)
summarizer = dict(
type=WildBenchSingleSummarizer,
customized_task_group_new=task_group_new,
)
work_dir = (
2025-02-04 12:21:29 +08:00
'./outputs/' + __file__.split('/')[-1].split('.')[0] + '/'
2025-01-24 10:32:17 +08:00
) # do NOT modify this line, yapf: disable, pylint: disable