OpenCompass/configs/models/mixtral/hf_mixtral_8x22b_v0_1.py
Alexander Lam a71122ee18
[Feature] Add Qwen1.5 MoE 7b and Mixtral 8x22b model configs (#1123)
* added qwen moe and mixtral 8x22 model configs

* updated README files news section
2024-05-09 11:04:26 +08:00

25 lines
623 B
Python

from opencompass.models import HuggingFaceCausalLM
models = [
dict(
abbr='mixtral-8x22b-v0.1',
type=HuggingFaceCausalLM,
path='mistralai/Mixtral-8x22B-v0.1',
tokenizer_path='mistralai/Mixtral-8x22B-v0.1',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True,
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=2, num_procs=1),
)
]