mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
parent
7596c356e9
commit
5f2e7c3469
9
configs/eval_internlm_7b.py
Normal file
9
configs/eval_internlm_7b.py
Normal file
@ -0,0 +1,9 @@
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
# choose a list of datasets
|
||||
from .datasets.collections.base_medium import datasets
|
||||
# choose a model of interest
|
||||
from .models.hf_internlm_7b import models
|
||||
# and output the results in a choosen format
|
||||
from .summarizers.medium import summarizer
|
20
configs/models/hf_baichuan_7b.py
Normal file
20
configs/models/hf_baichuan_7b.py
Normal file
@ -0,0 +1,20 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='baichuan-7b-hf',
|
||||
path="baichuan-inc/baichuan-7B",
|
||||
tokenizer_path='baichuan-inc/baichuan-7B',
|
||||
tokenizer_kwargs=dict(padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=False,),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto', trust_remote_code=True),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
21
configs/models/hf_chatglm2_6b.py
Normal file
21
configs/models/hf_chatglm2_6b.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFace
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFace,
|
||||
abbr='chatglm2-6b-hf',
|
||||
path='THUDM/chatglm2-6b',
|
||||
tokenizer_path='THUDM/chatglm2-6b',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto', revision='a6d54fac46dff2db65d53416c207a4485ca6bd40'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
21
configs/models/hf_chatglm_6b.py
Normal file
21
configs/models/hf_chatglm_6b.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFace
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFace,
|
||||
abbr='chatglm-6b-hf',
|
||||
path='THUDM/chatglm-6b',
|
||||
tokenizer_path='THUDM/chatglm-6b',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto', revision='1d240ba371910e9282298d4592532d7f0f3e9f3e'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
21
configs/models/hf_falcon_7b.py
Normal file
21
configs/models/hf_falcon_7b.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='falcon-7b-hf',
|
||||
path='tiiuae/falcon-7b',
|
||||
tokenizer_path='tiiuae/falcon-7b',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto', revision='2f5c3cd4eace6be6c0f12981f377fb35e5bf6ee5'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
21
configs/models/hf_internlm_7b.py
Normal file
21
configs/models/hf_internlm_7b.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm-7b-hf',
|
||||
path="internlm-7b",
|
||||
tokenizer_path='internlm-7b',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
29
configs/models/hf_internlm_chat_7b.py
Normal file
29
configs/models/hf_internlm_chat_7b.py
Normal file
@ -0,0 +1,29 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='<|User|>:', end='<eoh>\n'),
|
||||
dict(role='BOT', begin='<|Bot|>:', end='<eoa>\n', generate=True),
|
||||
],
|
||||
)
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='internlm-chat-7b-hf',
|
||||
path="internlm-chat-7b",
|
||||
tokenizer_path='internlm-chat-7b',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
meta_template=_meta_template,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
21
configs/models/hf_llama_13b.py
Normal file
21
configs/models/hf_llama_13b.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
# LLaMA 13B
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
path="decapoda-research/llama-13b-hf",
|
||||
tokenizer_path='decapoda-research/llama-13b-hf',
|
||||
tokenizer_kwargs=dict(padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto'),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
)
|
||||
]
|
22
configs/models/hf_llama_30b.py
Normal file
22
configs/models/hf_llama_30b.py
Normal file
@ -0,0 +1,22 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
# LLaMA 30B
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='llama-30b-hf',
|
||||
path="decapoda-research/llama-30b-hf",
|
||||
tokenizer_path='decapoda-research/llama-30b-hf',
|
||||
tokenizer_kwargs=dict(padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto'),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=4, num_procs=1),
|
||||
)
|
||||
]
|
22
configs/models/hf_llama_65b.py
Normal file
22
configs/models/hf_llama_65b.py
Normal file
@ -0,0 +1,22 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
# LLaMA 65B
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='llama-65b-hf',
|
||||
path="decapoda-research/llama-65b-hf",
|
||||
tokenizer_path='decapoda-research/llama-65b-hf',
|
||||
tokenizer_kwargs=dict(padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto'),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=8, num_procs=1),
|
||||
)
|
||||
]
|
@ -5,6 +5,7 @@ models = [
|
||||
# LLaMA 7B
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='llama-7b-hf',
|
||||
path="decapoda-research/llama-7b-hf",
|
||||
tokenizer_path='decapoda-research/llama-7b-hf',
|
||||
tokenizer_kwargs=dict(padding_side='left',
|
||||
@ -15,8 +16,7 @@ models = [
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto'),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
|
||||
]
|
21
configs/models/hf_moss_moon_003_base.py
Normal file
21
configs/models/hf_moss_moon_003_base.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='moss-moon-003-base-hf',
|
||||
path='fnlp/moss-moon-003-base',
|
||||
tokenizer_path='fnlp/moss-moon-003-base',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto', revision='5e406ca0ebbdea11cc3b12aa5932995c692568ac'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
21
configs/models/hf_moss_moon_003_sft.py
Normal file
21
configs/models/hf_moss_moon_003_sft.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='moss-moon-003-sft-hf',
|
||||
path='fnlp/moss-moon-003-sft',
|
||||
tokenizer_path='fnlp/moss-moon-003-sft',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto', revision='7119d446173035561f40977fb9cb999995bb7517'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
27
configs/models/hf_mpt_7b.py
Normal file
27
configs/models/hf_mpt_7b.py
Normal file
@ -0,0 +1,27 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='mpt-7b-hf',
|
||||
path='mosaicml/mpt-7b',
|
||||
tokenizer_path='mosaicml/mpt-7b',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=True
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True,
|
||||
max_seq_len=4096,
|
||||
revision='68e1a8e0ebb9b30f3c45c1ef6195980f29063ae2',
|
||||
),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
27
configs/models/hf_mpt_instruct_7b.py
Normal file
27
configs/models/hf_mpt_instruct_7b.py
Normal file
@ -0,0 +1,27 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='mpt-instruct-7b-hf',
|
||||
path="mosaicml/mpt-7b-instruct",
|
||||
tokenizer_path="mosaicml/mpt-7b-instruct",
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
use_fast=True
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True,
|
||||
max_seq_len=4096,
|
||||
revision='68e1a8e0ebb9b30f3c45c1ef6195980f29063ae2',
|
||||
),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
21
configs/models/hf_tigerbot_7b_base.py
Normal file
21
configs/models/hf_tigerbot_7b_base.py
Normal file
@ -0,0 +1,21 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='tigerbot-base-7b-hf',
|
||||
path='TigerResearch/tigerbot-7b-base',
|
||||
tokenizer_path='TigerResearch/tigerbot-7b-base',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(trust_remote_code=True, device_map='auto', revision='5f34fd223586e9efb8eb0e3bc667f03581886992'),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
),
|
||||
]
|
@ -1,17 +1,17 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
_meta_template = dict(
|
||||
round=[
|
||||
dict(role='HUMAN', begin='\n\n### Instruction:\n:'),
|
||||
dict(role='HUMAN', begin='\n\n### Instruction:\n:'),
|
||||
dict(role='BOT', begin='\n\n### Response:\n:', generate=True),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='TigerBot-SFT',
|
||||
abbr='tigerbot-sft-7b-hf',
|
||||
path="TigerResearch/tigerbot-7b-sft",
|
||||
tokenizer_path='TigerResearch/tigerbot-7b-sft',
|
||||
tokenizer_kwargs=dict(
|
22
configs/models/hf_vicuna_v1.3_13b.py
Normal file
22
configs/models/hf_vicuna_v1.3_13b.py
Normal file
@ -0,0 +1,22 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='vicuna-13b-hf',
|
||||
path="lmsys/vicuna-13b-v1.3",
|
||||
tokenizer_path='lmsys/vicuna-13b-v1.3',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto'),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=2, num_procs=1)
|
||||
)
|
||||
]
|
22
configs/models/hf_vicuna_v1.3_33b.py
Normal file
22
configs/models/hf_vicuna_v1.3_33b.py
Normal file
@ -0,0 +1,22 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='vicuna-33b-hf',
|
||||
path="lmsys/vicuna-33b-v1.3",
|
||||
tokenizer_path='lmsys/vicuna-33b-v1.3',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto'),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=4, num_procs=1)
|
||||
)
|
||||
]
|
22
configs/models/hf_vicuna_v1.3_7b.py
Normal file
22
configs/models/hf_vicuna_v1.3_7b.py
Normal file
@ -0,0 +1,22 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='vicuna-7b-hf',
|
||||
path="lmsys/vicuna-7b-v1.3",
|
||||
tokenizer_path='lmsys/vicuna-7b-v1.3',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
use_fast=False,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(device_map='auto'),
|
||||
batch_padding=False, # if false, inference with for-loop without batch padding
|
||||
run_cfg=dict(num_gpus=1, num_procs=1)
|
||||
)
|
||||
]
|
24
configs/models/hf_wizardlm_7b.py
Normal file
24
configs/models/hf_wizardlm_7b.py
Normal file
@ -0,0 +1,24 @@
|
||||
from opencompass.models import HuggingFaceCausalLM
|
||||
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=HuggingFaceCausalLM,
|
||||
abbr='wizardlm-7b-hf',
|
||||
path='TheBloke/wizardLM-7B-HF',
|
||||
tokenizer_path='TheBloke/wizardLM-7B-HF',
|
||||
tokenizer_kwargs=dict(
|
||||
padding_side='left',
|
||||
truncation_side='left',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
max_out_len=100,
|
||||
max_seq_len=2048,
|
||||
batch_size=8,
|
||||
model_kwargs=dict(
|
||||
device_map='auto',
|
||||
trust_remote_code=True,
|
||||
),
|
||||
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||
)
|
||||
]
|
Loading…
Reference in New Issue
Block a user