add model configs

This commit is contained in:
DseidLi 2024-03-19 16:03:40 +08:00
parent 75425acdf8
commit 2527fda8a5
3 changed files with 90 additions and 0 deletions

View File

@ -0,0 +1,33 @@
from opencompass.models.turbomind import TurboMindModel
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n',
generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=TurboMindModel,
abbr='internlm2-chat-20b-turbomind',
path="internlm/internlm2-chat-20b",
meta_template=_meta_template,
engine_config=dict(session_len=210000,
max_batch_size=8,
rope_scaling_factor=3.0,
model_name="internlm2-chat-20b",
tp=2),
gen_config=dict(top_k=1, top_p=0.8,
temperature=1.0,
max_new_tokens=2000,),
max_out_len=2000,
max_seq_len=210000,
batch_size=1,
concurrency=8,
run_cfg=dict(num_gpus=2, num_procs=1),
)
]

View File

@ -0,0 +1,32 @@
from opencompass.models.turbomind import TurboMindModel
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n',
generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=TurboMindModel,
abbr='internlm2-chat-7b-turbomind',
path="internlm/internlm2-chat-7b",
meta_template=_meta_template,
engine_config=dict(session_len=210000,
max_batch_size=8,
rope_scaling_factor=2.0,
model_name="internlm2-chat-7b"),
gen_config=dict(top_k=1, top_p=0.8,
temperature=1.0,
max_new_tokens=2000),
max_out_len=2000,
max_seq_len=210000,
batch_size=8,
concurrency=8,
run_cfg=dict(num_gpus=1, num_procs=1),
)
]

View File

@ -0,0 +1,25 @@
from opencompass.models import VLLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
],
)
models = [
dict(
type=VLLM,
abbr='qwen-14b-chat-vllm',
path="Qwen/Qwen-14B-Chat",
model_kwargs=dict(tensor_parallel_size=4),
meta_template=_meta_template,
max_out_len=100,
max_seq_len=2048,
batch_size=32,
generation_kwargs=dict(temperature=0),
end_str='<|im_end|>',
run_cfg=dict(num_gpus=4, num_procs=1),
)
]