mirror of
https://github.com/open-compass/opencompass.git
synced 2025-05-30 16:03:24 +08:00
[Feat] Support mm models on public dataset and fix several issues. (#412)
* [Feat] Add public dataset support for visualglm, qwenvl, and flamingo * [Fix] MMBench related changes. * [Fix] Openflamingo inference. * [Fix] Hide ckpt path. * [Fix] Pre-commit. --------- Co-authored-by: Haodong Duan <dhd.efz@gmail.com>
This commit is contained in:
parent
7c2726c23b
commit
bd50bad8b5
@ -24,7 +24,7 @@ dataset = dict(type='opencompass.MMBenchDataset',
|
|||||||
data_file='data/mmbench/mmbench_test_20230712.tsv',
|
data_file='data/mmbench/mmbench_test_20230712.tsv',
|
||||||
pipeline=val_pipeline)
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
mmbench_dataloader = dict(
|
llava_mmbench_dataloader = dict(
|
||||||
batch_size=1,
|
batch_size=1,
|
||||||
num_workers=4,
|
num_workers=4,
|
||||||
dataset=dataset,
|
dataset=dataset,
|
||||||
@ -33,7 +33,7 @@ mmbench_dataloader = dict(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
llava_model = dict(
|
llava_mmbench_model = dict(
|
||||||
type='llava',
|
type='llava',
|
||||||
model_path='/path/to/llava',
|
model_path='/path/to/llava',
|
||||||
prompt_constructor=dict(type=LLaVAMMBenchPromptConstructor),
|
prompt_constructor=dict(type=LLaVAMMBenchPromptConstructor),
|
||||||
@ -41,7 +41,7 @@ llava_model = dict(
|
|||||||
) # noqa
|
) # noqa
|
||||||
|
|
||||||
# evaluation settings
|
# evaluation settings
|
||||||
mmbench_evaluator = [
|
llava_mmbench_evaluator = [
|
||||||
dict(type='opencompass.DumpResults',
|
dict(type='opencompass.DumpResults',
|
||||||
save_path='work_dirs/llava-7b-mmbench.xlsx')
|
save_path='work_dirs/llava-7b-mmbench.xlsx')
|
||||||
]
|
]
|
||||||
|
@ -35,8 +35,8 @@ mplug_owl_mmbench_dataloader = dict(
|
|||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
mplug_owl_mmbench_model = dict(
|
mplug_owl_mmbench_model = dict(
|
||||||
type='mplug_owl_7b',
|
type='mplug_owl-7b',
|
||||||
model_path='/mplug-owl-llama-7b-ft/',
|
model_path='/mplug-owl-llama-7b-ft',
|
||||||
prompt_constructor=dict(type=MplugOwlMMBenchPromptConstructor),
|
prompt_constructor=dict(type=MplugOwlMMBenchPromptConstructor),
|
||||||
post_processor=dict(type=MplugOwlMMBenchPostProcessor)
|
post_processor=dict(type=MplugOwlMMBenchPostProcessor)
|
||||||
) # noqa
|
) # noqa
|
||||||
@ -46,5 +46,3 @@ mplug_owl_mmbench_evaluator = [
|
|||||||
dict(type='opencompass.DumpResults',
|
dict(type='opencompass.DumpResults',
|
||||||
save_path='work_dirs/mplug_owl-7b-mmagibench-v0.1.0.xlsx')
|
save_path='work_dirs/mplug_owl-7b-mmagibench-v0.1.0.xlsx')
|
||||||
]
|
]
|
||||||
|
|
||||||
mplug_owl_mmbench_load_from = None
|
|
75
configs/multimodal/openflamingo/openflamingo_coco_caption.py
Normal file
75
configs/multimodal/openflamingo/openflamingo_coco_caption.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.COCOCaption',
|
||||||
|
data_root='data/coco',
|
||||||
|
data_prefix=dict(img_path='images'),
|
||||||
|
ann_file='annotations/coco_karpathy_val.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_coco_caption_dataloader = dict(
|
||||||
|
batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_coco_caption_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='caption',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_coco_caption_evaluator = [
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.COCOCaption',
|
||||||
|
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
|
||||||
|
) # noqa
|
||||||
|
]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
76
configs/multimodal/openflamingo/openflamingo_flickr30k.py
Normal file
76
configs/multimodal/openflamingo/openflamingo_flickr30k.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.Flickr30kCaption',
|
||||||
|
data_root='data/flickr30k',
|
||||||
|
ann_file='annotations/dataset_flickr30k.json',
|
||||||
|
data_prefix='images',
|
||||||
|
split='val',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_flickr30k_dataloader = dict(
|
||||||
|
batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_flickr30k_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='caption',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_flickr30k_evaluator = [
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.COCOCaption',
|
||||||
|
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
|
||||||
|
) # noqa
|
||||||
|
]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
75
configs/multimodal/openflamingo/openflamingo_gqa.py
Normal file
75
configs/multimodal/openflamingo/openflamingo_gqa.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.GQA',
|
||||||
|
data_root='data/gqa',
|
||||||
|
data_prefix='images',
|
||||||
|
ann_file='annotations/testdev_balanced_questions.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_gqa_dataloader = dict(
|
||||||
|
batch_size=8,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_gqa_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
|
||||||
|
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
@ -1,3 +1,5 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoMMBenchPromptConstructor
|
||||||
|
|
||||||
# dataloader settings
|
# dataloader settings
|
||||||
val_pipeline = [
|
val_pipeline = [
|
||||||
dict(type='mmpretrain.PILToNumpy'),
|
dict(type='mmpretrain.PILToNumpy'),
|
||||||
@ -17,7 +19,7 @@ dataset = dict(type='opencompass.MMBenchDataset',
|
|||||||
data_file='data/mmbench/mmbench_test_20230712.tsv',
|
data_file='data/mmbench/mmbench_test_20230712.tsv',
|
||||||
pipeline=val_pipeline)
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
openflamingo_dataloader = dict(
|
openflamingo_mmbench_dataloader = dict(
|
||||||
batch_size=1,
|
batch_size=1,
|
||||||
num_workers=4,
|
num_workers=4,
|
||||||
dataset=dataset,
|
dataset=dataset,
|
||||||
@ -27,7 +29,7 @@ openflamingo_dataloader = dict(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
openflamingo_model = dict(
|
openflamingo_mmbench_model = dict(
|
||||||
type='openflamingo',
|
type='openflamingo',
|
||||||
data_preprocessor=dict(
|
data_preprocessor=dict(
|
||||||
type='mmpretrain.MultiModalDataPreprocessor',
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
@ -59,11 +61,13 @@ openflamingo_model = dict(
|
|||||||
cross_attn_every_n_layers=4,
|
cross_attn_every_n_layers=4,
|
||||||
use_media_placement_augmentation=False),
|
use_media_placement_augmentation=False),
|
||||||
),
|
),
|
||||||
|
task='vqa',
|
||||||
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoMMBenchPromptConstructor)
|
||||||
)
|
)
|
||||||
|
|
||||||
# evaluation settings
|
# evaluation settings
|
||||||
openflamingo_evaluator = [
|
openflamingo_mmbench_evaluator = [
|
||||||
dict(
|
dict(
|
||||||
type='opencompass.DumpResults',
|
type='opencompass.DumpResults',
|
||||||
save_path= # noqa: E251
|
save_path= # noqa: E251
|
||||||
|
75
configs/multimodal/openflamingo/openflamingo_ocr_vqa.py
Normal file
75
configs/multimodal/openflamingo/openflamingo_ocr_vqa.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.OCRVQA',
|
||||||
|
data_root='data/ocrvqa',
|
||||||
|
ann_file='annotations/dataset.json',
|
||||||
|
split='test',
|
||||||
|
data_prefix='images',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_ocrvqa_dataloader = dict(
|
||||||
|
batch_size=8,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_ocrvqa_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
77
configs/multimodal/openflamingo/openflamingo_ok_vqa.py
Normal file
77
configs/multimodal/openflamingo/openflamingo_ok_vqa.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(
|
||||||
|
type='mmpretrain.COCOVQA',
|
||||||
|
data_root='data/okvqa',
|
||||||
|
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
|
||||||
|
ann_file='annotations/mscoco_val2014_annotations.json',
|
||||||
|
pipeline=val_pipeline,
|
||||||
|
data_prefix='images/val2014',
|
||||||
|
)
|
||||||
|
|
||||||
|
openflamingo_okvqa_dataloader = dict(
|
||||||
|
batch_size=8,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_okvqa_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
76
configs/multimodal/openflamingo/openflamingo_scienceqa.py
Normal file
76
configs/multimodal/openflamingo/openflamingo_scienceqa.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoScienceQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=[
|
||||||
|
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution'
|
||||||
|
])
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.ScienceQA',
|
||||||
|
data_root='./data/scienceqa',
|
||||||
|
split='val',
|
||||||
|
split_file='pid_splits.json',
|
||||||
|
ann_file='problems.json',
|
||||||
|
image_only=True,
|
||||||
|
data_prefix=dict(img_path='val'),
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_scienceqa_dataloader = dict(
|
||||||
|
batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_scienceqa_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoScienceQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
76
configs/multimodal/openflamingo/openflamingo_textvqa.py
Normal file
76
configs/multimodal/openflamingo/openflamingo_textvqa.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(
|
||||||
|
type='mmpretrain.TextVQA',
|
||||||
|
data_root='data/textvqa',
|
||||||
|
ann_file='annotations/TextVQA_0.5.1_val.json',
|
||||||
|
pipeline=val_pipeline,
|
||||||
|
data_prefix='images/train_images',
|
||||||
|
)
|
||||||
|
|
||||||
|
openflamingo_textvqa_dataloader = dict(
|
||||||
|
batch_size=8,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_textvqa_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
74
configs/multimodal/openflamingo/openflamingo_vizwiz.py
Normal file
74
configs/multimodal/openflamingo/openflamingo_vizwiz.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.VizWiz',
|
||||||
|
data_root='data/vizwiz/',
|
||||||
|
data_prefix='Images/val',
|
||||||
|
ann_file='Annotations/val.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_vizwiz_dataloader = dict(
|
||||||
|
batch_size=8,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_vizwiz_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
||||||
|
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
75
configs/multimodal/openflamingo/openflamingo_vqav2.py
Normal file
75
configs/multimodal/openflamingo/openflamingo_vqav2.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(
|
||||||
|
type='mmpretrain.COCOVQA',
|
||||||
|
data_root='data/coco',
|
||||||
|
data_prefix='images/val2014',
|
||||||
|
question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json',
|
||||||
|
ann_file='annotations/v2_mscoco_val2014_annotations.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_vqav2_dataloader = dict(
|
||||||
|
batch_size=8,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_vqav2_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
75
configs/multimodal/openflamingo/openflamingo_vsr.py
Normal file
75
configs/multimodal/openflamingo/openflamingo_vsr.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor, OpenFlamingoVSRPostProcessor
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ResizeEdge',
|
||||||
|
scale=224,
|
||||||
|
interpolation='bicubic',
|
||||||
|
backend='pillow'),
|
||||||
|
dict(type='CenterCrop', crop_size=(224, 224)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.VSR',
|
||||||
|
data_root='data/vsr/',
|
||||||
|
data_prefix='images/',
|
||||||
|
ann_file='annotations/test.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
openflamingo_vsr_dataloader = dict(
|
||||||
|
batch_size=8,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False),
|
||||||
|
collate_fn=dict(type='default_collate'),
|
||||||
|
persistent_workers=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
openflamingo_vsr_model = dict(
|
||||||
|
type='openflamingo',
|
||||||
|
data_preprocessor=dict(
|
||||||
|
type='mmpretrain.MultiModalDataPreprocessor',
|
||||||
|
mean=[122.770938, 116.7460125, 104.09373615],
|
||||||
|
std=[68.5005327, 66.6321579, 70.32316305],
|
||||||
|
to_rgb=True,
|
||||||
|
),
|
||||||
|
tokenizer=dict(type='mmpretrain.LlamaTokenizer',
|
||||||
|
name_or_path='decapoda-research/llama-7b-hf'),
|
||||||
|
vision_encoder=dict(
|
||||||
|
type='mmpretrain.VisionTransformer',
|
||||||
|
arch='l',
|
||||||
|
patch_size=14,
|
||||||
|
pre_norm=True,
|
||||||
|
norm_cfg=dict(type='LN', eps=1e-5),
|
||||||
|
layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')),
|
||||||
|
final_norm=False,
|
||||||
|
out_type='raw',
|
||||||
|
pretrained= # noqa: E251
|
||||||
|
'/path/to/vision/encoder', # noqa
|
||||||
|
),
|
||||||
|
lang_encoder=dict(
|
||||||
|
base=dict(type='mmpretrain.AutoModelForCausalLM',
|
||||||
|
name_or_path=
|
||||||
|
'decapoda-research/llama-7b-hf',
|
||||||
|
local_files_only=True),
|
||||||
|
adapter=dict(type='mmpretrain.FlamingoLMAdapter',
|
||||||
|
vis_hidden_size=1024,
|
||||||
|
cross_attn_every_n_layers=4,
|
||||||
|
use_media_placement_augmentation=False),
|
||||||
|
),
|
||||||
|
task='vqa',
|
||||||
|
generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0),
|
||||||
|
prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor, shot_prompt=('The cat is behind the laptop. Short Answer:yes<|endofchunk|>' # noqa: E501
|
||||||
|
'The cow is ahead of the person. Short Answer:no<|endofchunk|>')),
|
||||||
|
post_processor=dict(type=OpenFlamingoVSRPostProcessor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
openflamingo_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')]
|
||||||
|
|
||||||
|
openflamingo_load_from = '/path/to/pretrained/weights' # noqa
|
@ -3,12 +3,9 @@
|
|||||||
### Prepare the environment
|
### Prepare the environment
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd opencompass/multimodal/models/otter
|
pip install otter_ai
|
||||||
git clone https://github.com/Luodian/Otter.git
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Then create a new conda environment and prepare the environement according to this [doc](https://github.com/Luodian/Otter)
|
|
||||||
|
|
||||||
### Start evaluation
|
### Start evaluation
|
||||||
|
|
||||||
#### Slurm
|
#### Slurm
|
||||||
|
44
configs/multimodal/qwen/qwenvl_chat_7b_coco_caption.py
Normal file
44
configs/multimodal/qwen/qwenvl_chat_7b_coco_caption.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['image_id'])
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.COCOCaption',
|
||||||
|
data_root='data/coco',
|
||||||
|
data_prefix=dict(img_path='images'),
|
||||||
|
ann_file='annotations/coco_karpathy_val.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_coco_caption_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_coco_caption_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'),
|
||||||
|
is_caption_task=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_coco_caption_evaluator = [
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.COCOCaption',
|
||||||
|
ann_file='data/coco/annotations/coco_karpathy_val_gt.json',
|
||||||
|
) # noqa
|
||||||
|
]
|
44
configs/multimodal/qwen/qwenvl_chat_7b_flickr30k.py
Normal file
44
configs/multimodal/qwen/qwenvl_chat_7b_flickr30k.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id'])
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.Flickr30kCaption',
|
||||||
|
data_root='data/flickr30k',
|
||||||
|
ann_file='annotations/dataset_flickr30k.json',
|
||||||
|
data_prefix='images',
|
||||||
|
split='val',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_flickr30k_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_flickr30k_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'),
|
||||||
|
is_caption_task=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_flickr30k_evaluator = [
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.COCOCaption',
|
||||||
|
ann_file='data/flickr30k/annotations/flickr30k_val_gt.json',
|
||||||
|
) # noqa
|
||||||
|
]
|
41
configs/multimodal/qwen/qwenvl_chat_7b_gqa.py
Normal file
41
configs/multimodal/qwen/qwenvl_chat_7b_gqa.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.GQA',
|
||||||
|
data_root='data/gqa',
|
||||||
|
data_prefix='images',
|
||||||
|
ann_file='annotations/testdev_balanced_questions.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_gqa_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_gqa_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')]
|
41
configs/multimodal/qwen/qwenvl_chat_7b_mmbench_cn.py
Normal file
41
configs/multimodal/qwen/qwenvl_chat_7b_mmbench_cn.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=[
|
||||||
|
'question', 'options', 'category', 'l2-category', 'context',
|
||||||
|
'index', 'options_dict'
|
||||||
|
])
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='opencompass.MMBenchDataset',
|
||||||
|
data_file='/mnt/petrelfs/share_data/yuanyike/cnbench_v010_rolling.tsv',
|
||||||
|
pipeline=val_pipeline,
|
||||||
|
sys_prompt='请从以下选项中选择一个正确选项。')
|
||||||
|
|
||||||
|
qwen_mmbench_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLMMBenchPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_mmbench_evaluator = [
|
||||||
|
dict(type='opencompass.DumpResults',
|
||||||
|
save_path='work_dirs/qwenvl-chat-7b-cnbench-v010.xlsx')
|
||||||
|
]
|
42
configs/multimodal/qwen/qwenvl_chat_7b_ocr_vqa.py
Normal file
42
configs/multimodal/qwen/qwenvl_chat_7b_ocr_vqa.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.OCRVQA',
|
||||||
|
data_root='data/ocrvqa',
|
||||||
|
ann_file='annotations/dataset.json',
|
||||||
|
split='test',
|
||||||
|
data_prefix='images',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_ocrvqa_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_ocrvqa_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
44
configs/multimodal/qwen/qwenvl_chat_7b_ok_vqa.py
Normal file
44
configs/multimodal/qwen/qwenvl_chat_7b_ok_vqa.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(
|
||||||
|
type='mmpretrain.COCOVQA',
|
||||||
|
data_root='data/okvqa',
|
||||||
|
question_file='annotations/OpenEnded_mscoco_val2014_questions.json',
|
||||||
|
ann_file='annotations/mscoco_val2014_annotations.json',
|
||||||
|
pipeline=val_pipeline,
|
||||||
|
data_prefix='images/val2014',
|
||||||
|
)
|
||||||
|
|
||||||
|
qwen_okvqa_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_okvqa_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
43
configs/multimodal/qwen/qwenvl_chat_7b_scienceqa.py
Normal file
43
configs/multimodal/qwen/qwenvl_chat_7b_scienceqa.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatScienceQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=[
|
||||||
|
'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution'
|
||||||
|
])
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.ScienceQA',
|
||||||
|
data_root='./data/scienceqa',
|
||||||
|
split='val',
|
||||||
|
split_file='pid_splits.json',
|
||||||
|
ann_file='problems.json',
|
||||||
|
image_only=True,
|
||||||
|
data_prefix=dict(img_path='val'),
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_scienceqa_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_scienceqa_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatScienceQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')]
|
43
configs/multimodal/qwen/qwenvl_chat_7b_textvqa.py
Normal file
43
configs/multimodal/qwen/qwenvl_chat_7b_textvqa.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(
|
||||||
|
type='mmpretrain.TextVQA',
|
||||||
|
data_root='data/textvqa',
|
||||||
|
ann_file='annotations/TextVQA_0.5.1_val.json',
|
||||||
|
pipeline=val_pipeline,
|
||||||
|
data_prefix='images/train_images',
|
||||||
|
)
|
||||||
|
|
||||||
|
qwen_textvqa_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_textvqa_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
41
configs/multimodal/qwen/qwenvl_chat_7b_vizwiz.py
Normal file
41
configs/multimodal/qwen/qwenvl_chat_7b_vizwiz.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.VizWiz',
|
||||||
|
data_root='data/vizwiz/',
|
||||||
|
data_prefix='Images/val',
|
||||||
|
ann_file='Annotations/val.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_vizwiz_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_vizwiz_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
43
configs/multimodal/qwen/qwenvl_chat_7b_vqav2.py
Normal file
43
configs/multimodal/qwen/qwenvl_chat_7b_vqav2.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(
|
||||||
|
type='mmpretrain.COCOVQA',
|
||||||
|
data_root='data/coco',
|
||||||
|
data_prefix='images/val2014',
|
||||||
|
question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json',
|
||||||
|
ann_file='annotations/v2_mscoco_val2014_annotations.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_vqav2_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_vqav2_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')]
|
42
configs/multimodal/qwen/qwenvl_chat_7b_vsr.py
Normal file
42
configs/multimodal/qwen/qwenvl_chat_7b_vsr.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor, QwenVLChatVSRPostProcessor
|
||||||
|
|
||||||
|
# dataloader settings
|
||||||
|
val_pipeline = [
|
||||||
|
dict(type='mmpretrain.LoadImageFromFile'),
|
||||||
|
dict(type='mmpretrain.ToPIL', to_rgb=True),
|
||||||
|
dict(type='mmpretrain.torchvision/Resize',
|
||||||
|
size=(448, 448),
|
||||||
|
interpolation=3),
|
||||||
|
dict(type='mmpretrain.torchvision/ToTensor'),
|
||||||
|
dict(type='mmpretrain.torchvision/Normalize',
|
||||||
|
mean=(0.48145466, 0.4578275, 0.40821073),
|
||||||
|
std=(0.26862954, 0.26130258, 0.27577711)),
|
||||||
|
dict(
|
||||||
|
type='mmpretrain.PackInputs',
|
||||||
|
algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'],
|
||||||
|
meta_keys=['question_id', 'image_id'],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
dataset = dict(type='mmpretrain.VSR',
|
||||||
|
data_root='data/vsr/',
|
||||||
|
data_prefix='images/',
|
||||||
|
ann_file='annotations/test.json',
|
||||||
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
|
qwen_vsr_dataloader = dict(batch_size=1,
|
||||||
|
num_workers=4,
|
||||||
|
dataset=dataset,
|
||||||
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
|
# model settings
|
||||||
|
qwen_vsr_model = dict(
|
||||||
|
type='qwen-vl-chat',
|
||||||
|
pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id
|
||||||
|
prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor),
|
||||||
|
post_processor=dict(type=QwenVLChatVSRPostProcessor)
|
||||||
|
)
|
||||||
|
|
||||||
|
# evaluation settings
|
||||||
|
qwen_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')]
|
@ -32,7 +32,7 @@ visualglm_coco_caption_model = dict(
|
|||||||
type='visualglm',
|
type='visualglm',
|
||||||
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
||||||
is_caption_task=True,
|
is_caption_task=True,
|
||||||
prompt_constructor=dict(type=VisualGLMBasePromptConstructor),
|
prompt_constructor=dict(type=VisualGLMBasePromptConstructor, system_prompt='A photo of'),
|
||||||
post_processor=dict(type=VisualGLMBasePostProcessor)
|
post_processor=dict(type=VisualGLMBasePostProcessor)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ visualglm_flickr30k_model = dict(
|
|||||||
type='visualglm',
|
type='visualglm',
|
||||||
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
||||||
is_caption_task=True,
|
is_caption_task=True,
|
||||||
prompt_constructor=dict(type=VisualGLMBasePromptConstructor),
|
prompt_constructor=dict(type=VisualGLMBasePromptConstructor, system_prompt='A photo of'),
|
||||||
post_processor=dict(type=VisualGLMBasePostProcessor)
|
post_processor=dict(type=VisualGLMBasePostProcessor)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,22 +20,23 @@ dataset = dict(type='opencompass.MMBenchDataset',
|
|||||||
data_file='data/mmbench/mmbench_test_20230712.tsv',
|
data_file='data/mmbench/mmbench_test_20230712.tsv',
|
||||||
pipeline=val_pipeline)
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
mmbench_dataloader = dict(batch_size=1,
|
visualglm_mmbench_dataloader = dict(batch_size=1,
|
||||||
num_workers=4,
|
num_workers=4,
|
||||||
dataset=dataset,
|
dataset=dataset,
|
||||||
collate_fn=dict(type='pseudo_collate'),
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
sampler=dict(type='DefaultSampler', shuffle=False))
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
visualglm_model = dict(
|
visualglm_mmbench_model = dict(
|
||||||
type='visualglm',
|
type='visualglm',
|
||||||
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
||||||
prompt_constructor=dict(type=VisualGLMMMBenchPromptConstructor),
|
prompt_constructor=dict(type=VisualGLMMMBenchPromptConstructor),
|
||||||
post_processor=dict(type=VisualGLMBasePostProcessor)
|
post_processor=dict(type=VisualGLMBasePostProcessor),
|
||||||
|
gen_kwargs=dict(max_new_tokens=50,num_beams=5,do_sample=False,repetition_penalty=1.0,length_penalty=-1.0)
|
||||||
)
|
)
|
||||||
|
|
||||||
# evaluation settings
|
# evaluation settings
|
||||||
mmbench_evaluator = [
|
visualglm_mmbench_evaluator = [
|
||||||
dict(type='opencompass.DumpResults',
|
dict(type='opencompass.DumpResults',
|
||||||
save_path='work_dirs/visualglm-6b-mmbench.xlsx')
|
save_path='work_dirs/visualglm-6b-mmbench.xlsx')
|
||||||
]
|
]
|
||||||
|
@ -26,7 +26,7 @@ dataset = dict(type='mmpretrain.ScienceQA',
|
|||||||
data_prefix=dict(img_path='val'),
|
data_prefix=dict(img_path='val'),
|
||||||
pipeline=val_pipeline)
|
pipeline=val_pipeline)
|
||||||
|
|
||||||
visualglm_vizwiz_dataloader = dict(batch_size=1,
|
visualglm_scienceqa_dataloader = dict(batch_size=1,
|
||||||
num_workers=4,
|
num_workers=4,
|
||||||
dataset=dataset,
|
dataset=dataset,
|
||||||
collate_fn=dict(type='pseudo_collate'),
|
collate_fn=dict(type='pseudo_collate'),
|
||||||
|
@ -33,7 +33,7 @@ visualglm_textvqa_dataloader = dict(batch_size=1,
|
|||||||
sampler=dict(type='DefaultSampler', shuffle=False))
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
visualglm_model = dict(
|
visualglm_textvqa_model = dict(
|
||||||
type='visualglm',
|
type='visualglm',
|
||||||
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
||||||
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
||||||
|
@ -31,7 +31,7 @@ visualglm_vizwiz_dataloader = dict(batch_size=1,
|
|||||||
sampler=dict(type='DefaultSampler', shuffle=False))
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
visualglm_model = dict(
|
visualglm_vizwiz_model = dict(
|
||||||
type='visualglm',
|
type='visualglm',
|
||||||
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
||||||
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
||||||
|
@ -33,7 +33,7 @@ visualglm_vqav2_dataloader = dict(batch_size=1,
|
|||||||
sampler=dict(type='DefaultSampler', shuffle=False))
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
visualglm_model = dict(
|
visualglm_vqav2_model = dict(
|
||||||
type='visualglm',
|
type='visualglm',
|
||||||
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
||||||
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
||||||
|
@ -32,7 +32,7 @@ visualglm_vsr_dataloader = dict(batch_size=1,
|
|||||||
sampler=dict(type='DefaultSampler', shuffle=False))
|
sampler=dict(type='DefaultSampler', shuffle=False))
|
||||||
|
|
||||||
# model settings
|
# model settings
|
||||||
visualglm_model = dict(
|
visualglm_vsr_model = dict(
|
||||||
type='visualglm',
|
type='visualglm',
|
||||||
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
pretrained_path='/path/to/visualglm', # or Huggingface repo id
|
||||||
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
prompt_constructor=dict(type=VisualGLMVQAPromptConstructor),
|
||||||
|
@ -19,9 +19,6 @@ if osp.exists('opencompass/multimodal/models/mplug_owl/mPLUG-Owl'):
|
|||||||
from .mplug_owl import * # noqa: F401, F403
|
from .mplug_owl import * # noqa: F401, F403
|
||||||
|
|
||||||
from .openflamingo import * # noqa: F401, F403
|
from .openflamingo import * # noqa: F401, F403
|
||||||
|
from .otter import * # noqa: F401, F403
|
||||||
if osp.exists('opencompass/multimodal/models/otter/Otter'):
|
|
||||||
from .otter import * # noqa: F401, F403
|
|
||||||
|
|
||||||
from .qwen import * # noqa: F401, F403
|
from .qwen import * # noqa: F401, F403
|
||||||
from .visualglm import * # noqa: F401, F403
|
from .visualglm import * # noqa: F401, F403
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
from .openflamingo import OpenFlamingoInferencer
|
from .openflamingo import OpenFlamingoInferencer
|
||||||
|
from .post_processor import OpenFlamingoVSRPostProcessor
|
||||||
|
from .prompt_constructor import (OpenFlamingoCaptionPromptConstructor,
|
||||||
|
OpenFlamingoMMBenchPromptConstructor,
|
||||||
|
OpenFlamingoScienceQAPromptConstructor,
|
||||||
|
OpenFlamingoVQAPromptConstructor)
|
||||||
|
|
||||||
__all__ = ['OpenFlamingoInferencer']
|
__all__ = [
|
||||||
|
'OpenFlamingoInferencer', 'OpenFlamingoMMBenchPromptConstructor',
|
||||||
|
'OpenFlamingoCaptionPromptConstructor', 'OpenFlamingoVQAPromptConstructor',
|
||||||
|
'OpenFlamingoScienceQAPromptConstructor', 'OpenFlamingoVSRPostProcessor'
|
||||||
|
]
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import re
|
||||||
from typing import List, Optional, Union
|
from typing import List, Optional, Union
|
||||||
|
|
||||||
import mmengine
|
import mmengine
|
||||||
@ -21,17 +22,18 @@ class OpenFlamingoInferencer(Flamingo):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
prompt_constructor: Optional[dict] = None,
|
prompt_constructor: dict,
|
||||||
post_processor: Optional[dict] = None,
|
post_processor: Optional[dict] = None,
|
||||||
mode: str = 'generation',
|
mode: str = 'generation',
|
||||||
**kwargs):
|
**kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
if prompt_constructor is not None:
|
self.prompt_constructor = mmengine.registry.build_from_cfg(
|
||||||
self.prompt_constructor = mmengine.registry.build_from_cfg(
|
prompt_constructor, MM_MODELS)
|
||||||
prompt_constructor, MM_MODELS)
|
|
||||||
if post_processor is not None:
|
if post_processor is not None:
|
||||||
self.post_processor = mmengine.registry.build_from_cfg(
|
self.post_processor = mmengine.registry.build_from_cfg(
|
||||||
post_processor, MM_MODELS)
|
post_processor, MM_MODELS)
|
||||||
|
else:
|
||||||
|
self.post_processor = None
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
|
|
||||||
def preprocess_text(self, data_samples: List[DataSample],
|
def preprocess_text(self, data_samples: List[DataSample],
|
||||||
@ -46,16 +48,7 @@ class OpenFlamingoInferencer(Flamingo):
|
|||||||
Returns:
|
Returns:
|
||||||
List[DataSample]: Return list of data samples.
|
List[DataSample]: Return list of data samples.
|
||||||
"""
|
"""
|
||||||
prompts = []
|
prompts = self.prompt_constructor(data_samples)
|
||||||
for sample in data_samples:
|
|
||||||
question = sample.get('question')
|
|
||||||
option = sample.get('options')
|
|
||||||
|
|
||||||
prompt = '<image>' + question + ' ' + option + ' ' + 'Answer:'
|
|
||||||
if data_samples[0].get('context') is not None:
|
|
||||||
prompt = sample.get('context') + ' ' + prompt
|
|
||||||
|
|
||||||
prompts.append(prompt)
|
|
||||||
|
|
||||||
self.tokenizer.padding_side = 'left'
|
self.tokenizer.padding_side = 'left'
|
||||||
input_text = self.tokenizer(
|
input_text = self.tokenizer(
|
||||||
@ -67,6 +60,42 @@ class OpenFlamingoInferencer(Flamingo):
|
|||||||
).to(device)
|
).to(device)
|
||||||
return input_text
|
return input_text
|
||||||
|
|
||||||
|
def post_process(
|
||||||
|
self, outputs: torch.Tensor,
|
||||||
|
data_samples: Optional[List[DataSample]]) -> List[DataSample]:
|
||||||
|
"""Perform post process for outputs for different task.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
outputs (torch.Tensor): The generated outputs.
|
||||||
|
data_samples (List[DataSample], optional): The annotation
|
||||||
|
data of every samples.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[DataSample]: Return list of data samples.
|
||||||
|
"""
|
||||||
|
outputs = self.tokenizer.batch_decode(outputs,
|
||||||
|
skip_special_tokens=True)
|
||||||
|
|
||||||
|
if data_samples is None:
|
||||||
|
data_samples = [DataSample() for _ in range(len(outputs))]
|
||||||
|
|
||||||
|
for output, data_sample in zip(outputs, data_samples):
|
||||||
|
# remove text pattern
|
||||||
|
if self.task == 'caption':
|
||||||
|
data_sample.pred_caption = re.split('Output', output,
|
||||||
|
1)[0].replace('"', '')
|
||||||
|
if self.post_processor:
|
||||||
|
data_sample.pred_caption = self.post_processor(
|
||||||
|
data_sample.pred_caption)
|
||||||
|
elif self.task == 'vqa':
|
||||||
|
data_sample.pred_answer = re.split('Question|Answer', output,
|
||||||
|
1)[0]
|
||||||
|
if self.post_processor:
|
||||||
|
data_sample.pred_answer = self.post_processor(
|
||||||
|
data_sample.pred_answer)
|
||||||
|
|
||||||
|
return data_samples
|
||||||
|
|
||||||
def forward(self, batch: dict) -> Union[DataSample, List[DataSample]]:
|
def forward(self, batch: dict) -> Union[DataSample, List[DataSample]]:
|
||||||
|
|
||||||
if self.mode == 'generation':
|
if self.mode == 'generation':
|
||||||
|
13
opencompass/multimodal/models/openflamingo/post_processor.py
Normal file
13
opencompass/multimodal/models/openflamingo/post_processor.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
class OpenFlamingoVSRPostProcessor:
|
||||||
|
"""VSR post processor for Openflamingo."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __call__(self, raw_response: str) -> str:
|
||||||
|
if 'yes' in raw_response.lower():
|
||||||
|
return 'yes'
|
||||||
|
elif 'no' in raw_response.lower():
|
||||||
|
return 'no'
|
||||||
|
else:
|
||||||
|
return 'unknown'
|
130
opencompass/multimodal/models/openflamingo/prompt_constructor.py
Normal file
130
opencompass/multimodal/models/openflamingo/prompt_constructor.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from mmpretrain.structures import DataSample
|
||||||
|
|
||||||
|
|
||||||
|
class OpenFlamingoMMBenchPromptConstructor:
|
||||||
|
"""MMBench prompt constructor for OpenFlamingo."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __call__(self, data_samples: DataSample) -> tuple:
|
||||||
|
"""Construct prompt.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_samples (DataSample): Input data_samples.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Raw text input (str).
|
||||||
|
"""
|
||||||
|
assert len(data_samples) == 1
|
||||||
|
sample = data_samples[0]
|
||||||
|
prompts = []
|
||||||
|
question = sample.get('question')
|
||||||
|
option = sample.get('options')
|
||||||
|
|
||||||
|
prompt = '<image>' + question + ' ' + option + ' ' + 'Answer:'
|
||||||
|
if sample.get('context') is not None:
|
||||||
|
prompt = sample.get('context') + ' ' + prompt
|
||||||
|
|
||||||
|
prompts.append(prompt)
|
||||||
|
|
||||||
|
return prompts
|
||||||
|
|
||||||
|
|
||||||
|
class OpenFlamingoCaptionPromptConstructor:
|
||||||
|
"""Caption prompt constructor for OpenFlamingo."""
|
||||||
|
|
||||||
|
def __init__(self, shot_prompt: Optional[str] = None) -> None:
|
||||||
|
if shot_prompt:
|
||||||
|
self.shot_prompt = shot_prompt
|
||||||
|
else:
|
||||||
|
self.shot_prompt = (
|
||||||
|
'Output:A child holding a flowered umbrella and petting a yak.<|endofchunk|>' # noqa
|
||||||
|
'Output:The child is holding a brush close to his mouth.<|endofchunk|>' # noqa
|
||||||
|
) # noqa
|
||||||
|
|
||||||
|
def __call__(self, data_samples: DataSample) -> tuple:
|
||||||
|
"""Construct prompt.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_samples (DataSample): Input data_samples.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Raw text input (str).
|
||||||
|
"""
|
||||||
|
assert len(data_samples) == 1
|
||||||
|
prompts = []
|
||||||
|
prompt = '<image>Output:'
|
||||||
|
prompts.append(self.shot_prompt + prompt)
|
||||||
|
return prompts
|
||||||
|
|
||||||
|
|
||||||
|
class OpenFlamingoVQAPromptConstructor:
|
||||||
|
"""VQA prompt constructor for OpenFlamingo."""
|
||||||
|
|
||||||
|
def __init__(self, shot_prompt: Optional[str] = None) -> None:
|
||||||
|
if shot_prompt:
|
||||||
|
self.shot_prompt = shot_prompt
|
||||||
|
else:
|
||||||
|
self.shot_prompt = (
|
||||||
|
'Question:Is the sky dark? Short Answer:yes<|endofchunk|>' # noqa: E501
|
||||||
|
'Question:What is on the white wall? Short Answer:pipe<|endofchunk|>' # noqa: E501
|
||||||
|
) # noqa
|
||||||
|
|
||||||
|
def __call__(self, data_samples: DataSample) -> tuple:
|
||||||
|
"""Construct prompt.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_samples (DataSample): Input data_samples.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Raw text input (str).
|
||||||
|
"""
|
||||||
|
prompts = []
|
||||||
|
for sample in data_samples:
|
||||||
|
question = sample.get('question')
|
||||||
|
prompt = '<image>Question:{} Short Answer:'.format(question)
|
||||||
|
prompts.append(self.shot_prompt + prompt)
|
||||||
|
return prompts
|
||||||
|
|
||||||
|
|
||||||
|
class OpenFlamingoScienceQAPromptConstructor:
|
||||||
|
"""ScienceQA prompt constructor for OpenFlamingo."""
|
||||||
|
choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'}
|
||||||
|
|
||||||
|
def __init__(self, shot_prompt: Optional[str] = None) -> None:
|
||||||
|
if shot_prompt:
|
||||||
|
self.shot_prompt = shot_prompt
|
||||||
|
else:
|
||||||
|
self.shot_prompt = (
|
||||||
|
"Context:Question:Which of these states is farthest north? Choices:['(A) West Virginia' '(B) Louisiana' '(C) Arizona' '(D) Oklahoma'] Answer with a single character: A<|endofchunk|>" # noqa
|
||||||
|
'Context:The diagrams below show two pure samples of gas in identical closed, rigid containers. Each colored ball represents one gas particle. Both samples have the same number of particles.' # noqa
|
||||||
|
"Question:Compare the average kinetic energies of the particles in each sample. Which sample has the higher temperature? Choices:'[(A) neither' '(B) sample A' '(C) sample B'] Answer with a single character: C<|endofchunk|>" # noqa
|
||||||
|
) # noqa
|
||||||
|
|
||||||
|
def __call__(self, data_samples: DataSample) -> tuple:
|
||||||
|
"""Construct prompt.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_samples (DataSample): Input data_samples.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Raw text input (str).
|
||||||
|
"""
|
||||||
|
assert len(data_samples) == 1
|
||||||
|
sample = data_samples[0]
|
||||||
|
question = sample.get('question')
|
||||||
|
choices = sample.get('choices')
|
||||||
|
choices = [
|
||||||
|
f'({self.choice_mapping[i]}) ' + item
|
||||||
|
for i, item in enumerate(choices)
|
||||||
|
]
|
||||||
|
hint = sample.get('hint')
|
||||||
|
prompts = []
|
||||||
|
prompt = '<image>Context:{} Question:{} Choices:{}'.format(
|
||||||
|
hint, question, choices)
|
||||||
|
prompt += ' Answer with a single character:'
|
||||||
|
prompts.append(self.shot_prompt + prompt)
|
||||||
|
return prompts
|
@ -9,3 +9,11 @@ if TYPE_CHECKING:
|
|||||||
raise OptionalDependencyNotAvailable()
|
raise OptionalDependencyNotAvailable()
|
||||||
except OptionalDependencyNotAvailable:
|
except OptionalDependencyNotAvailable:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
from .otter import Otter
|
||||||
|
from .post_processor import OTTERMMBenchPostProcessor
|
||||||
|
from .prompt_constructor import OTTERMMBenchPromptConstructor
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Otter', 'OTTERMMBenchPromptConstructor', 'OTTERMMBenchPostProcessor'
|
||||||
|
]
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
|
import importlib
|
||||||
|
|
||||||
import mmengine
|
import mmengine
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
|
from mmengine.device import get_device
|
||||||
|
|
||||||
from opencompass.registry import MM_MODELS
|
from opencompass.registry import MM_MODELS
|
||||||
|
|
||||||
from .Otter.models.otter.modeling_otter import OtterForConditionalGeneration
|
|
||||||
|
|
||||||
|
|
||||||
@MM_MODELS.register_module('otter-9b')
|
@MM_MODELS.register_module('otter-9b')
|
||||||
class Otter(nn.Module):
|
class Otter(nn.Module):
|
||||||
@ -19,14 +20,20 @@ class Otter(nn.Module):
|
|||||||
model_path (str): The path of OTTER model
|
model_path (str): The path of OTTER model
|
||||||
in Huggingface model hub format.
|
in Huggingface model hub format.
|
||||||
load_bit (str): The bit of OTTER model, can be "fp32" or "bf16".
|
load_bit (str): The bit of OTTER model, can be "fp32" or "bf16".
|
||||||
|
mode (str): The mode of inference. Defaults to 'generation'.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, model_path, load_bit, prompt_constructor,
|
def __init__(self,
|
||||||
post_processor) -> None:
|
model_path,
|
||||||
|
load_bit,
|
||||||
|
prompt_constructor,
|
||||||
|
post_processor,
|
||||||
|
mode='generation') -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
torch_dtype = torch.bfloat16 if load_bit == 'bf16' else torch.float32
|
torch_dtype = torch.bfloat16 if load_bit == 'bf16' else torch.float32
|
||||||
self.model = OtterForConditionalGeneration.from_pretrained(
|
otter_ai = importlib.import_module('otter_ai')
|
||||||
model_path, torch_dtype=torch_dtype)
|
self.model = otter_ai.OtterForConditionalGeneration.from_pretrained(
|
||||||
|
model_path, torch_dtype=torch_dtype, device_map=get_device())
|
||||||
self.tokenizer = self.model.text_tokenizer
|
self.tokenizer = self.model.text_tokenizer
|
||||||
self.tokenizer.padding_side = 'left'
|
self.tokenizer.padding_side = 'left'
|
||||||
self.model_dtype = next(self.model.parameters()).dtype
|
self.model_dtype = next(self.model.parameters()).dtype
|
||||||
@ -35,6 +42,7 @@ class Otter(nn.Module):
|
|||||||
if post_processor is not None:
|
if post_processor is not None:
|
||||||
self.post_processor = mmengine.registry.build_from_cfg(
|
self.post_processor = mmengine.registry.build_from_cfg(
|
||||||
post_processor, MM_MODELS)
|
post_processor, MM_MODELS)
|
||||||
|
self.mode = mode
|
||||||
|
|
||||||
def forward(self, batch):
|
def forward(self, batch):
|
||||||
if self.mode == 'generation':
|
if self.mode == 'generation':
|
||||||
|
@ -53,9 +53,9 @@ class OTTERMMBenchPromptConstructor:
|
|||||||
context = data_sample.get('context')
|
context = data_sample.get('context')
|
||||||
# e.g. <image>User: What is the color of the sky? A: Blue B: Red C: Green D: Yellow GPT:<answer> # noqa
|
# e.g. <image>User: What is the color of the sky? A: Blue B: Red C: Green D: Yellow GPT:<answer> # noqa
|
||||||
if context is not None:
|
if context is not None:
|
||||||
prompt = f'{self.image_token}{self.user_label} {context[i]} {question[i]} {options[i]} {self.model_label}:{self.reply_token}' # noqa
|
prompt = f'{self.image_token}{self.user_label} {context} {question} {options} {self.model_label}:{self.reply_token}' # noqa
|
||||||
else:
|
else:
|
||||||
prompt = f'{self.image_token}{self.user_label} {question[i]} {options[i]} {self.model_label}:{self.reply_token}' # noqa
|
prompt = f'{self.image_token}{self.user_label} {question} {options} {self.model_label}:{self.reply_token}' # noqa
|
||||||
|
|
||||||
return prompt
|
return prompt
|
||||||
|
|
||||||
|
@ -1,8 +1,13 @@
|
|||||||
from .post_processor import QwenVLBasePostProcessor
|
from .post_processor import QwenVLBasePostProcessor, QwenVLChatVSRPostProcessor
|
||||||
from .prompt_constructor import QwenVLMMBenchPromptConstructor
|
from .prompt_constructor import (QwenVLChatPromptConstructor,
|
||||||
|
QwenVLChatScienceQAPromptConstructor,
|
||||||
|
QwenVLChatVQAPromptConstructor,
|
||||||
|
QwenVLMMBenchPromptConstructor)
|
||||||
from .qwen import QwenVLBase, QwenVLChat
|
from .qwen import QwenVLBase, QwenVLChat
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'QwenVLBase', 'QwenVLChat', 'QwenVLBasePostProcessor',
|
'QwenVLBase', 'QwenVLChat', 'QwenVLBasePostProcessor',
|
||||||
'QwenVLMMBenchPromptConstructor'
|
'QwenVLMMBenchPromptConstructor', 'QwenVLChatPromptConstructor',
|
||||||
|
'QwenVLChatVQAPromptConstructor', 'QwenVLChatVSRPostProcessor',
|
||||||
|
'QwenVLChatScienceQAPromptConstructor'
|
||||||
]
|
]
|
||||||
|
@ -14,3 +14,18 @@ class QwenVLBasePostProcessor:
|
|||||||
response = self.tokenizer.decode(pred)[input_len:]
|
response = self.tokenizer.decode(pred)[input_len:]
|
||||||
response = response.replace('<|endoftext|>', '').strip()
|
response = response.replace('<|endoftext|>', '').strip()
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
class QwenVLChatVSRPostProcessor:
|
||||||
|
"""VSR post processor for Qwen-VL-Chat."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __call__(self, response: str) -> str:
|
||||||
|
if 'yes' in response.lower():
|
||||||
|
return 'yes'
|
||||||
|
elif 'no' in response.lower():
|
||||||
|
return 'no'
|
||||||
|
else:
|
||||||
|
return 'unknown'
|
||||||
|
@ -7,7 +7,7 @@ class QwenVLMMBenchPromptConstructor:
|
|||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def __call__(self, inputs: dict) -> str:
|
def __call__(self, inputs: dict) -> list:
|
||||||
data_samples = inputs['data_samples']
|
data_samples = inputs['data_samples']
|
||||||
assert len(data_samples) == 1
|
assert len(data_samples) == 1
|
||||||
data_sample = data_samples[0]
|
data_sample = data_samples[0]
|
||||||
@ -27,3 +27,74 @@ class QwenVLMMBenchPromptConstructor:
|
|||||||
},
|
},
|
||||||
]
|
]
|
||||||
return format_input
|
return format_input
|
||||||
|
|
||||||
|
|
||||||
|
class QwenVLChatPromptConstructor:
|
||||||
|
"""Prompt constructorfor Qwen-VL-Chat."""
|
||||||
|
|
||||||
|
def __init__(self, prompt='') -> None:
|
||||||
|
self.prompt = prompt
|
||||||
|
|
||||||
|
def __call__(self, inputs: dict) -> list:
|
||||||
|
assert len(inputs['data_samples']) == 1
|
||||||
|
format_input = [
|
||||||
|
{
|
||||||
|
'image': 'This_is_path_to_an_image.'
|
||||||
|
}, # Just placeholder for Image Tokens
|
||||||
|
{
|
||||||
|
'text': self.prompt
|
||||||
|
},
|
||||||
|
]
|
||||||
|
return format_input
|
||||||
|
|
||||||
|
|
||||||
|
class QwenVLChatVQAPromptConstructor:
|
||||||
|
"""VQA prompt constructor for Qwen-VL-Chat."""
|
||||||
|
|
||||||
|
def __init__(self, prompt='') -> None:
|
||||||
|
self.prompt = prompt
|
||||||
|
|
||||||
|
def __call__(self, inputs: dict) -> list:
|
||||||
|
data_samples = inputs['data_samples']
|
||||||
|
assert len(data_samples) == 1
|
||||||
|
data_sample = data_samples[0]
|
||||||
|
question = data_sample.get('question')
|
||||||
|
format_input = [
|
||||||
|
{
|
||||||
|
'image': 'This_is_path_to_an_image.'
|
||||||
|
}, # Just placeholder for Image Tokens
|
||||||
|
{
|
||||||
|
'text': question + self.prompt
|
||||||
|
},
|
||||||
|
]
|
||||||
|
return format_input
|
||||||
|
|
||||||
|
|
||||||
|
class QwenVLChatScienceQAPromptConstructor:
|
||||||
|
"""ScienceQA prompt constructor for Qwen-VL-Chat."""
|
||||||
|
choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'}
|
||||||
|
|
||||||
|
def __init__(self, prompt='') -> None:
|
||||||
|
self.prompt = prompt
|
||||||
|
|
||||||
|
def __call__(self, inputs: dict) -> list:
|
||||||
|
data_samples = inputs['data_samples']
|
||||||
|
assert len(data_samples) == 1
|
||||||
|
data_sample = data_samples[0]
|
||||||
|
question = data_sample.get('question')
|
||||||
|
choices = data_sample.get('choices')
|
||||||
|
choices = [
|
||||||
|
f'({self.choice_mapping[i]}) ' + item
|
||||||
|
for i, item in enumerate(choices)
|
||||||
|
]
|
||||||
|
choices = 'Choices: ' + ' '.join(choices) + '\n'
|
||||||
|
contexts = 'Context: ' + data_sample.get('hint')
|
||||||
|
format_input = [
|
||||||
|
{
|
||||||
|
'image': 'This_is_path_to_an_image.'
|
||||||
|
}, # Just placeholder for Image Tokens
|
||||||
|
{
|
||||||
|
'text': contexts + question + choices + self.prompt
|
||||||
|
},
|
||||||
|
]
|
||||||
|
return format_input
|
||||||
|
@ -55,6 +55,8 @@ class QwenVLBase(nn.Module):
|
|||||||
if post_processor is not None:
|
if post_processor is not None:
|
||||||
self.post_processor = mmengine.registry.build_from_cfg(
|
self.post_processor = mmengine.registry.build_from_cfg(
|
||||||
post_processor, MM_MODELS)
|
post_processor, MM_MODELS)
|
||||||
|
else:
|
||||||
|
self.post_processor = None
|
||||||
self.is_caption_task = is_caption_task
|
self.is_caption_task = is_caption_task
|
||||||
self.model.transformer.forward = types.MethodType(
|
self.model.transformer.forward = types.MethodType(
|
||||||
forward_hack, self.model.transformer)
|
forward_hack, self.model.transformer)
|
||||||
@ -154,6 +156,9 @@ class QwenVLChat(QwenVLBase):
|
|||||||
verbose=False,
|
verbose=False,
|
||||||
errors='replace')
|
errors='replace')
|
||||||
|
|
||||||
|
if self.post_processor:
|
||||||
|
response = self.post_processor(response)
|
||||||
|
|
||||||
data_sample = batch['data_samples'][0]
|
data_sample = batch['data_samples'][0]
|
||||||
if self.is_caption_task:
|
if self.is_caption_task:
|
||||||
data_sample.pred_caption = response
|
data_sample.pred_caption = response
|
||||||
|
@ -81,9 +81,7 @@ class VisualGLMBasePromptConstructor:
|
|||||||
data_samples = batch.pop('data_samples')
|
data_samples = batch.pop('data_samples')
|
||||||
|
|
||||||
# generate text prompt
|
# generate text prompt
|
||||||
img_prompt = '<img></img>'
|
prompt = ['<img></img>' + self.prompt for i in range(images.shape[0])]
|
||||||
prompt = img_prompt + self.prompt
|
|
||||||
image_position = prompt.rfind('<img>') + 5
|
|
||||||
|
|
||||||
image_position = 5
|
image_position = 5
|
||||||
|
|
||||||
|
@ -43,7 +43,14 @@ class VisualGLM(nn.Module):
|
|||||||
if gen_kwargs:
|
if gen_kwargs:
|
||||||
self.gen_kwargs = gen_kwargs
|
self.gen_kwargs = gen_kwargs
|
||||||
else:
|
else:
|
||||||
self.gen_kwargs = dict()
|
self.gen_kwargs = dict(
|
||||||
|
max_new_tokens=30,
|
||||||
|
num_beams=1,
|
||||||
|
do_sample=False,
|
||||||
|
repetition_penalty=1.0,
|
||||||
|
length_penalty=-1.0,
|
||||||
|
)
|
||||||
|
|
||||||
self.is_caption_task = is_caption_task
|
self.is_caption_task = is_caption_task
|
||||||
|
|
||||||
def encode_by_tokenizer(self, multi_prompts, image_position):
|
def encode_by_tokenizer(self, multi_prompts, image_position):
|
||||||
|
Loading…
Reference in New Issue
Block a user