mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
add model configs
This commit is contained in:
parent
75425acdf8
commit
2527fda8a5
33
configs/models/hf_internlm/lmdeploy_internlm2_chat_20b.py
Normal file
33
configs/models/hf_internlm/lmdeploy_internlm2_chat_20b.py
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
from opencompass.models.turbomind import TurboMindModel
|
||||||
|
|
||||||
|
|
||||||
|
_meta_template = dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||||
|
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n',
|
||||||
|
generate=True),
|
||||||
|
],
|
||||||
|
eos_token_id=92542
|
||||||
|
)
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='internlm2-chat-20b-turbomind',
|
||||||
|
path="internlm/internlm2-chat-20b",
|
||||||
|
meta_template=_meta_template,
|
||||||
|
engine_config=dict(session_len=210000,
|
||||||
|
max_batch_size=8,
|
||||||
|
rope_scaling_factor=3.0,
|
||||||
|
model_name="internlm2-chat-20b",
|
||||||
|
tp=2),
|
||||||
|
gen_config=dict(top_k=1, top_p=0.8,
|
||||||
|
temperature=1.0,
|
||||||
|
max_new_tokens=2000,),
|
||||||
|
max_out_len=2000,
|
||||||
|
max_seq_len=210000,
|
||||||
|
batch_size=1,
|
||||||
|
concurrency=8,
|
||||||
|
run_cfg=dict(num_gpus=2, num_procs=1),
|
||||||
|
)
|
||||||
|
]
|
32
configs/models/hf_internlm/lmdeploy_internlm2_chat_7b.py
Normal file
32
configs/models/hf_internlm/lmdeploy_internlm2_chat_7b.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
from opencompass.models.turbomind import TurboMindModel
|
||||||
|
|
||||||
|
|
||||||
|
_meta_template = dict(
|
||||||
|
round=[
|
||||||
|
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
|
||||||
|
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n',
|
||||||
|
generate=True),
|
||||||
|
],
|
||||||
|
eos_token_id=92542
|
||||||
|
)
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=TurboMindModel,
|
||||||
|
abbr='internlm2-chat-7b-turbomind',
|
||||||
|
path="internlm/internlm2-chat-7b",
|
||||||
|
meta_template=_meta_template,
|
||||||
|
engine_config=dict(session_len=210000,
|
||||||
|
max_batch_size=8,
|
||||||
|
rope_scaling_factor=2.0,
|
||||||
|
model_name="internlm2-chat-7b"),
|
||||||
|
gen_config=dict(top_k=1, top_p=0.8,
|
||||||
|
temperature=1.0,
|
||||||
|
max_new_tokens=2000),
|
||||||
|
max_out_len=2000,
|
||||||
|
max_seq_len=210000,
|
||||||
|
batch_size=8,
|
||||||
|
concurrency=8,
|
||||||
|
run_cfg=dict(num_gpus=1, num_procs=1),
|
||||||
|
)
|
||||||
|
]
|
25
configs/models/qwen/vllm_qwen_14b_chat.py
Normal file
25
configs/models/qwen/vllm_qwen_14b_chat.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
from opencompass.models import VLLM
|
||||||
|
|
||||||
|
|
||||||
|
_meta_template = dict(
|
||||||
|
round=[
|
||||||
|
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
|
||||||
|
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
models = [
|
||||||
|
dict(
|
||||||
|
type=VLLM,
|
||||||
|
abbr='qwen-14b-chat-vllm',
|
||||||
|
path="Qwen/Qwen-14B-Chat",
|
||||||
|
model_kwargs=dict(tensor_parallel_size=4),
|
||||||
|
meta_template=_meta_template,
|
||||||
|
max_out_len=100,
|
||||||
|
max_seq_len=2048,
|
||||||
|
batch_size=32,
|
||||||
|
generation_kwargs=dict(temperature=0),
|
||||||
|
end_str='<|im_end|>',
|
||||||
|
run_cfg=dict(num_gpus=4, num_procs=1),
|
||||||
|
)
|
||||||
|
]
|
Loading…
Reference in New Issue
Block a user