mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00

* add TheoremQA with 5-shot * add huggingface_above_v4_33 classes * use num_worker partitioner in cli * update theoremqa * update TheoremQA * add TheoremQA * rename theoremqa -> TheoremQA * update TheoremQA output path * rewrite many model configs * update huggingface * further update * refine configs * update configs * update configs * add configs/eval_llama3_instruct.py * add summarizer multi faceted * update bbh datasets * update configs/models/hf_llama/lmdeploy_llama3_8b_instruct.py * rename class * update readme * update hf above v4.33
37 lines
942 B
Python
37 lines
942 B
Python
from opencompass.models.turbomind import TurboMindModel
|
|
|
|
|
|
_meta_template = dict(
|
|
round=[
|
|
dict(role="HUMAN", begin="<|im_start|>user\n", end="<|im_end|>\n"),
|
|
dict(role="BOT", begin="<|im_start|>assistant\n", end="<|im_end|>\n", generate=True),
|
|
],
|
|
)
|
|
|
|
models = [
|
|
dict(
|
|
type=TurboMindModel,
|
|
abbr="internlm2-chat-7b-turbomind",
|
|
path="internlm/internlm2-chat-7b",
|
|
meta_template=_meta_template,
|
|
engine_config=dict(
|
|
session_len=32768,
|
|
max_batch_size=32,
|
|
model_name="internlm2-chat-7b",
|
|
tp=1,
|
|
stop_words=[2, 92542],
|
|
),
|
|
gen_config=dict(
|
|
top_k=1,
|
|
top_p=0.8,
|
|
temperature=1.0,
|
|
max_new_tokens=2000,
|
|
),
|
|
max_out_len=2000,
|
|
max_seq_len=32768,
|
|
batch_size=32,
|
|
concurrency=8,
|
|
run_cfg=dict(num_gpus=1, num_procs=1),
|
|
)
|
|
]
|