[Fix] rename qwen2-beta -> qwen1.5 (#894)

This commit is contained in:
Fengzhe Zhou 2024-02-19 14:55:35 +08:00 committed by GitHub
parent b6e21ece38
commit 9119e2ac39
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 88 additions and 88 deletions

View File

@ -3,9 +3,9 @@ from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-7b-hf',
path="Qwen/Qwen2-beta-7B",
tokenizer_path='Qwen/Qwen2-beta-7B',
abbr='qwen1.5-0.5b-hf',
path="Qwen/Qwen1.5-0.5B",
tokenizer_path='Qwen/Qwen1.5-0.5B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -11,8 +11,8 @@ _meta_template = dict(
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-7b-chat-hf',
path="Qwen/Qwen2-beta-7B-Chat",
abbr='qwen1.5-0.5b-chat-hf',
path="Qwen/Qwen1.5-0.5B-Chat",
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -3,9 +3,9 @@ from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-4b-hf',
path="Qwen/Qwen2-beta-4B",
tokenizer_path='Qwen/Qwen2-beta-4B',
abbr='qwen1.5-14b-hf',
path="Qwen/Qwen1.5-14B",
tokenizer_path='Qwen/Qwen1.5-14B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -11,8 +11,8 @@ _meta_template = dict(
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-4b-chat-hf',
path="Qwen/Qwen2-beta-4B-Chat",
abbr='qwen1.5-14b-chat-hf',
path="Qwen/Qwen1.5-14B-Chat",
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -3,9 +3,9 @@ from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-14b-hf',
path="Qwen/Qwen2-beta-14B",
tokenizer_path='Qwen/Qwen2-beta-14B',
abbr='qwen1.5-1.8b-hf',
path="Qwen/Qwen1.5-1.8B",
tokenizer_path='Qwen/Qwen1.5-1.8B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -11,8 +11,8 @@ _meta_template = dict(
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-14b-chat-hf',
path="Qwen/Qwen2-beta-14B-Chat",
abbr='qwen1.5-1.8b-chat-hf',
path="Qwen/Qwen1.5-1.8B-Chat",
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -3,9 +3,9 @@ from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-1.8b-hf',
path="Qwen/Qwen2-beta-1_8B",
tokenizer_path='Qwen/Qwen2-beta-1_8B',
abbr='qwen1.5-4b-hf',
path="Qwen/Qwen1.5-4B",
tokenizer_path='Qwen/Qwen1.5-4B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -11,8 +11,8 @@ _meta_template = dict(
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-0.5b-chat-hf',
path="Qwen/Qwen2-beta-0_5B-Chat",
abbr='qwen1.5-4b-chat-hf',
path="Qwen/Qwen1.5-4B-Chat",
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -3,9 +3,9 @@ from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-72b-hf',
path="Qwen/Qwen2-beta-72B",
tokenizer_path='Qwen/Qwen2-beta-72B',
abbr='qwen1.5-72b-hf',
path="Qwen/Qwen1.5-72B",
tokenizer_path='Qwen/Qwen1.5-72B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -11,8 +11,8 @@ _meta_template = dict(
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-72b-chat-hf',
path="Qwen/Qwen2-beta-72B-Chat",
abbr='qwen1.5-72b-chat-hf',
path="Qwen/Qwen1.5-72B-Chat",
model_kwargs=dict(
device_map='auto',
trust_remote_code=True

View File

@ -0,0 +1,25 @@
from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen1.5-7b-hf',
path="Qwen/Qwen1.5-7B",
tokenizer_path='Qwen/Qwen1.5-7B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
use_fast=False,
),
pad_token_id=151645,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
)
]

View File

@ -0,0 +1,34 @@
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
],
eos_token_id=151645,
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen1.5-7b-chat-hf',
path="Qwen/Qwen1.5-7B-Chat",
model_kwargs=dict(
device_map='auto',
trust_remote_code=True
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
use_fast=False,
),
meta_template=_meta_template,
pad_token_id=151645,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
)
]

View File

@ -1,25 +0,0 @@
from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-0.5b-hf',
path="Qwen/Qwen2-beta-0_5B",
tokenizer_path='Qwen/Qwen2-beta-0_5B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
use_fast=False,
),
pad_token_id=151645,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
)
]

View File

@ -1,34 +0,0 @@
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
],
eos_token_id=151645,
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen2-beta-1.8b-chat-hf',
path="Qwen/Qwen2-beta-1_8B-Chat",
model_kwargs=dict(
device_map='auto',
trust_remote_code=True
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
use_fast=False,
),
meta_template=_meta_template,
pad_token_id=151645,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
)
]

View File

@ -4,8 +4,8 @@ from opencompass.models import VLLM
models = [
dict(
type=VLLM,
abbr='qwen2-beta-72b-vllm',
path="Qwen/Qwen2-beta-72B",
abbr='qwen1.5-72b-vllm',
path="Qwen/Qwen1.5-72B",
model_kwargs=dict(tensor_parallel_size=4),
max_out_len=100,
max_seq_len=2048,

View File

@ -12,8 +12,8 @@ _meta_template = dict(
models = [
dict(
type=VLLM,
abbr='qwen2-beta-72b-chat-vllm',
path="Qwen/Qwen2-beta-72B-Chat",
abbr='qwen1.5-72b-chat-vllm',
path="Qwen/Qwen1.5-72B-Chat",
model_kwargs=dict(tensor_parallel_size=4),
meta_template=_meta_template,
max_out_len=100,