mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Bump] Bump version to 0.3.7 (#1733)
This commit is contained in:
parent
98c4666d65
commit
e2a290fd46
@ -1,8 +1,9 @@
|
||||
__version__ = '0.3.6'
|
||||
__version__ = '0.3.7'
|
||||
|
||||
|
||||
def _warn_about_config_migration():
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
'Starting from v0.4.0, all AMOTIC configuration files currently '
|
||||
'located in `./configs/datasets`, `./configs/models`, and '
|
||||
@ -10,7 +11,8 @@ def _warn_about_config_migration():
|
||||
'`opencompass/configs/` package. Please update your configuration '
|
||||
'file paths accordingly.',
|
||||
UserWarning, # Changed to UserWarning
|
||||
stacklevel=2)
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
# Trigger the warning
|
||||
|
@ -3,7 +3,7 @@ from opencompass.models import TurboMindModelwithChatTemplate
|
||||
models = [
|
||||
dict(
|
||||
type=TurboMindModelwithChatTemplate,
|
||||
abbr='deepseek-v2_lite-turbomind',
|
||||
abbr='deepseek-v2_lite-chat-turbomind',
|
||||
path='deepseek-ai/DeepSeek-V2-Lite-Chat',
|
||||
engine_config=dict(
|
||||
session_len=7168,
|
||||
|
17
opencompass/configs/models/gemma/lmdeploy_gemma_27b.py
Normal file
17
opencompass/configs/models/gemma/lmdeploy_gemma_27b.py
Normal file
@ -0,0 +1,17 @@
|
||||
from opencompass.models import TurboMindModel
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=TurboMindModel,
|
||||
abbr='gemma-2-27b-turbomind',
|
||||
path='google/gemma-2-27b',
|
||||
engine_config=dict(session_len=16384, max_batch_size=16, tp=2),
|
||||
gen_config=dict(
|
||||
top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096
|
||||
),
|
||||
max_seq_len=16384,
|
||||
max_out_len=4096,
|
||||
batch_size=16,
|
||||
run_cfg=dict(num_gpus=2),
|
||||
)
|
||||
]
|
17
opencompass/configs/models/gemma/lmdeploy_gemma_9b.py
Normal file
17
opencompass/configs/models/gemma/lmdeploy_gemma_9b.py
Normal file
@ -0,0 +1,17 @@
|
||||
from opencompass.models import TurboMindModel
|
||||
|
||||
models = [
|
||||
dict(
|
||||
type=TurboMindModel,
|
||||
abbr='gemma-2-9b-turbomind',
|
||||
path='google/gemma-2-9b',
|
||||
engine_config=dict(session_len=16384, max_batch_size=16, tp=1),
|
||||
gen_config=dict(
|
||||
top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096
|
||||
),
|
||||
max_seq_len=16384,
|
||||
max_out_len=4096,
|
||||
batch_size=16,
|
||||
run_cfg=dict(num_gpus=1),
|
||||
)
|
||||
]
|
Loading…
Reference in New Issue
Block a user