2023-07-06 12:27:41 +08:00
|
|
|
from mmengine.config import read_base
|
|
|
|
|
2023-07-05 14:54:06 +08:00
|
|
|
with read_base():
|
2023-07-08 16:40:24 +08:00
|
|
|
from .datasets.winograd.winograd_ppl import winograd_datasets
|
|
|
|
from .datasets.siqa.siqa_gen import siqa_datasets
|
|
|
|
|
|
|
|
datasets = [*siqa_datasets, *winograd_datasets]
|
2023-07-05 14:54:06 +08:00
|
|
|
|
2023-07-08 16:40:24 +08:00
|
|
|
from opencompass.models import HuggingFaceCausalLM
|
2023-07-05 14:54:06 +08:00
|
|
|
|
2023-07-08 16:40:24 +08:00
|
|
|
# OPT-350M
|
|
|
|
opt350m = dict(
|
2023-07-05 14:54:06 +08:00
|
|
|
type=HuggingFaceCausalLM,
|
2023-07-08 16:40:24 +08:00
|
|
|
# the folowing are HuggingFaceCausalLM init parameters
|
|
|
|
path='facebook/opt-350m',
|
|
|
|
tokenizer_path='facebook/opt-350m',
|
2023-07-05 14:54:06 +08:00
|
|
|
tokenizer_kwargs=dict(
|
|
|
|
padding_side='left',
|
|
|
|
truncation_side='left',
|
|
|
|
proxies=None,
|
2023-07-08 16:40:24 +08:00
|
|
|
trust_remote_code=True),
|
|
|
|
model_kwargs=dict(device_map='auto'),
|
2023-07-05 14:54:06 +08:00
|
|
|
max_seq_len=2048,
|
2023-07-08 16:40:24 +08:00
|
|
|
# the folowing are not HuggingFaceCausalLM init parameters
|
|
|
|
abbr='opt350m', # Model abbreviation
|
|
|
|
max_out_len=100, # Maximum number of generated tokens
|
|
|
|
batch_size=64,
|
|
|
|
run_cfg=dict(num_gpus=1), # Run configuration for specifying resource requirements
|
|
|
|
)
|
|
|
|
|
|
|
|
# OPT-125M
|
|
|
|
opt125m = dict(
|
|
|
|
type=HuggingFaceCausalLM,
|
|
|
|
# the folowing are HuggingFaceCausalLM init parameters
|
|
|
|
path='facebook/opt-125m',
|
|
|
|
tokenizer_path='facebook/opt-125m',
|
|
|
|
tokenizer_kwargs=dict(
|
|
|
|
padding_side='left',
|
|
|
|
truncation_side='left',
|
|
|
|
proxies=None,
|
|
|
|
trust_remote_code=True),
|
2023-07-05 14:54:06 +08:00
|
|
|
model_kwargs=dict(device_map='auto'),
|
2023-07-08 16:40:24 +08:00
|
|
|
max_seq_len=2048,
|
|
|
|
# the folowing are not HuggingFaceCausalLM init parameters
|
|
|
|
abbr='opt125m', # Model abbreviation
|
|
|
|
max_out_len=100, # Maximum number of generated tokens
|
|
|
|
batch_size=128,
|
|
|
|
run_cfg=dict(num_gpus=1), # Run configuration for specifying resource requirements
|
2023-07-05 14:54:06 +08:00
|
|
|
)
|
2023-07-08 16:40:24 +08:00
|
|
|
|
|
|
|
models = [opt350m, opt125m]
|