mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00

* add TheoremQA with 5-shot * add huggingface_above_v4_33 classes * use num_worker partitioner in cli * update theoremqa * update TheoremQA * add TheoremQA * rename theoremqa -> TheoremQA * update TheoremQA output path * rewrite many model configs * update huggingface * further update * refine configs * update configs * update configs * add configs/eval_llama3_instruct.py * add summarizer multi faceted * update bbh datasets * update configs/models/hf_llama/lmdeploy_llama3_8b_instruct.py * rename class * update readme * update hf above v4.33
19 lines
543 B
Python
19 lines
543 B
Python
from opencompass.models import Mixtral
|
|
|
|
# Please follow the instruction in https://github.com/open-compass/MixtralKit
|
|
# to download the model weights and install the requirements
|
|
|
|
|
|
models = [
|
|
dict(
|
|
abbr="mixtral-8x7b-32k",
|
|
type=Mixtral,
|
|
path="./models/mixtral/mixtral-8x7b-32kseqlen",
|
|
tokenizer_path="./models/mixtral/mixtral-8x7b-32kseqlen/tokenizer.model",
|
|
max_out_len=100,
|
|
max_seq_len=2048,
|
|
batch_size=8,
|
|
num_gpus=2,
|
|
run_cfg=dict(num_gpus=2, num_procs=1),
|
|
),
|
|
] |