diff --git a/opencompass/configs/models/chatglm/hf_glm4_9b.py b/opencompass/configs/models/chatglm/hf_glm4_9b.py new file mode 100644 index 00000000..d0d79d33 --- /dev/null +++ b/opencompass/configs/models/chatglm/hf_glm4_9b.py @@ -0,0 +1,12 @@ +from opencompass.models import HuggingFaceBaseModel + +models = [ + dict( + type=HuggingFaceBaseModel, + abbr='glm-4-9b-hf', + path='THUDM/glm-4-9b', + max_out_len=1024, + batch_size=8, + run_cfg=dict(num_gpus=1), + ) +] diff --git a/opencompass/configs/models/gemma/lmdeploy_gemma_27b_it.py b/opencompass/configs/models/gemma/lmdeploy_gemma_27b_it.py new file mode 100644 index 00000000..c2cb48e3 --- /dev/null +++ b/opencompass/configs/models/gemma/lmdeploy_gemma_27b_it.py @@ -0,0 +1,17 @@ +from opencompass.models import TurboMindModelwithChatTemplate + +models = [ + dict( + type=TurboMindModelwithChatTemplate, + abbr='gemma-2-27b-it-turbomind', + path='google/gemma-2-27b-it', + engine_config=dict(session_len=16384, max_batch_size=16, tp=1), + gen_config=dict( + top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096 + ), + max_seq_len=16384, + max_out_len=4096, + batch_size=16, + run_cfg=dict(num_gpus=1), + ) +] diff --git a/opencompass/configs/models/gemma/lmdeploy_gemma_9b_it.py b/opencompass/configs/models/gemma/lmdeploy_gemma_9b_it.py new file mode 100644 index 00000000..b33a5d52 --- /dev/null +++ b/opencompass/configs/models/gemma/lmdeploy_gemma_9b_it.py @@ -0,0 +1,17 @@ +from opencompass.models import TurboMindModelwithChatTemplate + +models = [ + dict( + type=TurboMindModelwithChatTemplate, + abbr='gemma-2-9b-it-turbomind', + path='google/gemma-2-9b-it', + engine_config=dict(session_len=16384, max_batch_size=16, tp=1), + gen_config=dict( + top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096 + ), + max_seq_len=16384, + max_out_len=4096, + batch_size=16, + run_cfg=dict(num_gpus=1), + ) +] diff --git a/opencompass/configs/models/qwen2_5/hf_qwen_2_5_14b.py b/opencompass/configs/models/qwen2_5/hf_qwen_2_5_14b.py new file mode 100644 index 00000000..2f64872f --- /dev/null +++ b/opencompass/configs/models/qwen2_5/hf_qwen_2_5_14b.py @@ -0,0 +1,12 @@ +from opencompass.models import HuggingFaceBaseModel + +models = [ + dict( + type=HuggingFaceBaseModel, + abbr='qwen2.5-14b-hf', + path='Qwen/Qwen2.5-14B', + max_out_len=1024, + batch_size=8, + run_cfg=dict(num_gpus=2), + ) +] diff --git a/opencompass/configs/models/qwen2_5/hf_qwen_2_5_32b.py b/opencompass/configs/models/qwen2_5/hf_qwen_2_5_32b.py new file mode 100644 index 00000000..ddd27f7f --- /dev/null +++ b/opencompass/configs/models/qwen2_5/hf_qwen_2_5_32b.py @@ -0,0 +1,12 @@ +from opencompass.models import HuggingFaceBaseModel + +models = [ + dict( + type=HuggingFaceBaseModel, + abbr='qwen2.5-32b-hf', + path='Qwen/Qwen2.5-32B', + max_out_len=1024, + batch_size=8, + run_cfg=dict(num_gpus=2), + ) +] \ No newline at end of file diff --git a/opencompass/configs/models/qwen2_5/hf_qwen_2_5_7b.py b/opencompass/configs/models/qwen2_5/hf_qwen_2_5_7b.py new file mode 100644 index 00000000..579950c6 --- /dev/null +++ b/opencompass/configs/models/qwen2_5/hf_qwen_2_5_7b.py @@ -0,0 +1,12 @@ +from opencompass.models import HuggingFaceBaseModel + +models = [ + dict( + type=HuggingFaceBaseModel, + abbr='qwen2.5-7b-hf', + path='Qwen/Qwen2.5-7B', + max_out_len=1024, + batch_size=8, + run_cfg=dict(num_gpus=1), + ) +] \ No newline at end of file