diff --git a/opencompass/configs/models/gemma/vllm_gemma_3_12b_it.py b/opencompass/configs/models/gemma/vllm_gemma_3_12b_it.py new file mode 100644 index 00000000..2914640f --- /dev/null +++ b/opencompass/configs/models/gemma/vllm_gemma_3_12b_it.py @@ -0,0 +1,16 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='gemma-3-12b-it-vllm', + path='google/gemma-3-12b-it', + model_kwargs=dict(tensor_parallel_size=4, + # for long context + rope_scaling={'factor': 8.0, 'rope_type': 'linear'}), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +] diff --git a/opencompass/configs/models/gemma/vllm_gemma_3_27b_it.py b/opencompass/configs/models/gemma/vllm_gemma_3_27b_it.py new file mode 100644 index 00000000..b6f4b93b --- /dev/null +++ b/opencompass/configs/models/gemma/vllm_gemma_3_27b_it.py @@ -0,0 +1,16 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='gemma-3-27b-it-vllm', + path='google/gemma-3-27b-it', + model_kwargs=dict(tensor_parallel_size=4, + # for long context + rope_scaling={'factor': 8.0, 'rope_type': 'linear'}), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +] diff --git a/opencompass/configs/models/gemma/vllm_gemma_3_4b_it.py b/opencompass/configs/models/gemma/vllm_gemma_3_4b_it.py new file mode 100644 index 00000000..22516ff7 --- /dev/null +++ b/opencompass/configs/models/gemma/vllm_gemma_3_4b_it.py @@ -0,0 +1,17 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='gemma-3-4b-it-vllm', + path='google/gemma-3-4b-it', + model_kwargs=dict(tensor_parallel_size=2, + # for long context + rope_scaling={'factor': 8.0, 'rope_type': 'linear'}), + max_seq_len=140000, + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=2), + ) +] diff --git a/opencompass/configs/models/hf_internlm/lmdeploy_internlm3_8b_instruct_128k.py b/opencompass/configs/models/hf_internlm/lmdeploy_internlm3_8b_instruct_128k.py new file mode 100644 index 00000000..1cc4e251 --- /dev/null +++ b/opencompass/configs/models/hf_internlm/lmdeploy_internlm3_8b_instruct_128k.py @@ -0,0 +1,19 @@ +from opencompass.models import TurboMindModelwithChatTemplate + +models = [ + dict( + type=TurboMindModelwithChatTemplate, + abbr='internlm3-8b-instruct-turbomind', + path='internlm/internlm3-8b-instruct', + engine_config=dict(session_len=142000, max_batch_size=1, tp=2, + # for long context + rope_scaling_factor=6.0), + gen_config=dict( + top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=8192 + ), + max_seq_len=142000, + max_out_len=8192, + batch_size=1, + run_cfg=dict(num_gpus=2), + ) +] diff --git a/opencompass/configs/models/hf_internlm/lmdeploy_oreal_32b.py b/opencompass/configs/models/hf_internlm/lmdeploy_oreal_32b.py new file mode 100644 index 00000000..1d10bd94 --- /dev/null +++ b/opencompass/configs/models/hf_internlm/lmdeploy_oreal_32b.py @@ -0,0 +1,20 @@ +from opencompass.models import TurboMindModelwithChatTemplate +from opencompass.utils.text_postprocessors import extract_non_reasoning_content + +models = [ + dict( + type=TurboMindModelwithChatTemplate, + abbr='OREAL-32B', + path='internlm/OREAL-32B', + engine_config=dict(session_len=32768, max_batch_size=16, tp=4), + gen_config=dict(top_k=1, + temperature=1e-6, + top_p=0.9, + max_new_tokens=32768), + max_seq_len=32768, + max_out_len=32768, + batch_size=16, + run_cfg=dict(num_gpus=4), + pred_postprocessor=dict(type=extract_non_reasoning_content) + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py new file mode 100644 index 00000000..6dec3743 --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2.5-14b-instruct-vllm', + path='Qwen/Qwen2.5-14B-Instruct', + model_kwargs=dict( + tensor_parallel_size=4, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py new file mode 100644 index 00000000..5c326734 --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2.5-32b-instruct-vllm', + path='Qwen/Qwen2.5-32B-Instruct', + model_kwargs=dict( + tensor_parallel_size=8, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=8), + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py new file mode 100644 index 00000000..2a4a52fa --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2_5-72b-instruct-vllm', + path='Qwen/Qwen2.5-72B-Instruct', + model_kwargs=dict( + tensor_parallel_size=8, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=8), + ) +] diff --git a/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py new file mode 100644 index 00000000..db21f730 --- /dev/null +++ b/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py @@ -0,0 +1,21 @@ +from opencompass.models import VLLMwithChatTemplate + +models = [ + dict( + type=VLLMwithChatTemplate, + abbr='qwen2.5-7b-instruct-vllm', + path='Qwen/Qwen2.5-7B-Instruct', + model_kwargs=dict( + tensor_parallel_size=4, + rope_scaling={ + 'factor': 4.0, + 'original_max_position_embeddings': 32768, + 'rope_type': 'yarn' + }, + ), + max_out_len=4096, + batch_size=1, + generation_kwargs=dict(temperature=0), + run_cfg=dict(num_gpus=4), + ) +]