OpenCompass/configs/models/qwen/hf_qwen1_5_moe_a2_7b.py
Alexander Lam a71122ee18
[Feature] Add Qwen1.5 MoE 7b and Mixtral 8x22b model configs (#1123)
* added qwen moe and mixtral 8x22 model configs

* updated README files news section
2024-05-09 11:04:26 +08:00

26 lines
668 B
Python

from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen1.5-moe-a2-7b-hf',
path="Qwen/Qwen1.5-MoE-A2.7B",
tokenizer_path='Qwen/Qwen1.5-MoE-A2.7B',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
use_fast=False,
),
pad_token_id=151645,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
)
]