From 3a232db471078c8cb9e22426ca53742525444ed9 Mon Sep 17 00:00:00 2001 From: Haodong Duan Date: Fri, 26 Apr 2024 21:20:14 +0800 Subject: [PATCH] [Deperecate] Remove multi-modal related stuff (#1072) * Remove MultiModal * update index.rst * update README * remove mmbench codes * update news --------- Co-authored-by: Leymore --- README.md | 1 + README_zh-CN.md | 3 +- configs/multimodal/instructblip/README.md | 49 --- .../instructblip/instructblip_coco_caption.py | 53 --- .../instructblip/instructblip_flickr30k.py | 54 --- .../instructblip/instructblip_gqa.py | 52 --- .../instructblip/instructblip_mmbench.py | 51 --- .../instructblip/instructblip_ocr_vqa.py | 51 --- .../instructblip/instructblip_ok_vqa.py | 54 --- .../instructblip/instructblip_scienceqa.py | 53 --- .../instructblip/instructblip_textvqa.py | 53 --- .../instructblip/instructblip_vizwiz.py | 51 --- .../instructblip/instructblip_vqav2.py | 53 --- .../instructblip/instructblip_vsr.py | 51 --- .../llama_adapter_v2_multimodal/README.md | 24 -- .../llama_adapter_v2_mm_7b_mmbench.py | 48 --- configs/multimodal/llava/README.md | 10 - .../multimodal/llava/llava_7b_coco_caption.py | 50 --- .../multimodal/llava/llava_7b_flickr30k.py | 52 --- configs/multimodal/llava/llava_7b_gqa.py | 49 --- configs/multimodal/llava/llava_7b_mmbench.py | 47 -- configs/multimodal/llava/llava_7b_ocr_vqa.py | 49 --- configs/multimodal/llava/llava_7b_ok_vqa.py | 51 --- .../multimodal/llava/llava_7b_scienceqa.py | 50 --- configs/multimodal/llava/llava_7b_textvqa.py | 50 --- configs/multimodal/llava/llava_7b_vizwiz.py | 48 --- configs/multimodal/llava/llava_7b_vqav2.py | 50 --- configs/multimodal/llava/llava_7b_vsr.py | 48 --- configs/multimodal/minigpt_4/README.md | 26 -- .../minigpt_4/minigpt_4_7b_coco_caption.py | 53 --- .../minigpt_4/minigpt_4_7b_flickr30k.py | 54 --- .../multimodal/minigpt_4/minigpt_4_7b_gqa.py | 52 --- .../minigpt_4/minigpt_4_7b_mmbench.py | 47 -- .../multimodal/minigpt_4/minigpt_4_7b_mme.py | 43 -- .../minigpt_4/minigpt_4_7b_ocr_vqa.py | 53 --- .../minigpt_4/minigpt_4_7b_ok_vqa.py | 55 --- .../minigpt_4/minigpt_4_7b_scienceqa.py | 52 --- .../minigpt_4/minigpt_4_7b_seedbench.py | 63 --- .../minigpt_4/minigpt_4_7b_textvqa.py | 55 --- .../minigpt_4/minigpt_4_7b_vizwiz.py | 52 --- .../minigpt_4/minigpt_4_7b_vqav2.py | 55 --- .../multimodal/minigpt_4/minigpt_4_7b_vsr.py | 52 --- configs/multimodal/mplug_owl/README.md | 24 -- .../mplug_owl/mplug_owl_7b_mmbench.py | 48 --- configs/multimodal/openflamingo/README.md | 21 - .../openflamingo/openflamingo_coco_caption.py | 75 ---- .../openflamingo/openflamingo_flickr30k.py | 76 ---- .../openflamingo/openflamingo_gqa.py | 75 ---- .../openflamingo/openflamingo_mmbench.py | 77 ---- .../openflamingo/openflamingo_ocr_vqa.py | 75 ---- .../openflamingo/openflamingo_ok_vqa.py | 77 ---- .../openflamingo/openflamingo_scienceqa.py | 76 ---- .../openflamingo/openflamingo_textvqa.py | 76 ---- .../openflamingo/openflamingo_vizwiz.py | 74 ---- .../openflamingo/openflamingo_vqav2.py | 75 ---- .../openflamingo/openflamingo_vsr.py | 75 ---- configs/multimodal/otter/README.md | 24 -- configs/multimodal/otter/otter_9b_mmbench.py | 43 -- .../multimodal/qwen/qwenvl_base_7b_mmbench.py | 41 -- .../qwen/qwenvl_chat_7b_coco_caption.py | 44 -- .../qwen/qwenvl_chat_7b_flickr30k.py | 44 -- configs/multimodal/qwen/qwenvl_chat_7b_gqa.py | 41 -- .../multimodal/qwen/qwenvl_chat_7b_mmbench.py | 40 -- .../qwen/qwenvl_chat_7b_mmbench_cn.py | 41 -- .../multimodal/qwen/qwenvl_chat_7b_ocr_vqa.py | 42 -- .../multimodal/qwen/qwenvl_chat_7b_ok_vqa.py | 44 -- .../qwen/qwenvl_chat_7b_scienceqa.py | 43 -- .../multimodal/qwen/qwenvl_chat_7b_textvqa.py | 43 -- .../multimodal/qwen/qwenvl_chat_7b_vizwiz.py | 41 -- .../multimodal/qwen/qwenvl_chat_7b_vqav2.py | 43 -- configs/multimodal/qwen/qwenvl_chat_7b_vsr.py | 42 -- configs/multimodal/tasks.py | 16 - .../visualglm/visualglm_6b_coco_caption.py | 45 -- .../visualglm/visualglm_6b_flickr30k.py | 46 -- .../multimodal/visualglm/visualglm_6b_gqa.py | 42 -- .../visualglm/visualglm_6b_mmbench.py | 42 -- .../visualglm/visualglm_6b_ocr_vqa.py | 43 -- .../visualglm/visualglm_6b_ok_vqa.py | 45 -- .../visualglm/visualglm_6b_scienceqa.py | 44 -- .../visualglm/visualglm_6b_textvqa.py | 44 -- .../visualglm/visualglm_6b_vizwiz.py | 42 -- .../visualglm/visualglm_6b_vqav2.py | 44 -- .../multimodal/visualglm/visualglm_6b_vsr.py | 43 -- docs/en/MMBench.md | 132 ------ docs/en/advanced_guides/multimodal_eval.md | 108 ----- docs/en/index.rst | 1 - docs/en/user_guides/framework_overview.md | 2 +- docs/zh_cn/advanced_guides/multimodal_eval.md | 107 ----- docs/zh_cn/index.rst | 1 - opencompass/cli/main.py | 17 +- opencompass/multimodal/datasets/__init__.py | 6 - opencompass/multimodal/datasets/mmbench.py | 84 ---- opencompass/multimodal/datasets/mme.py | 74 ---- opencompass/multimodal/datasets/seedbench.py | 174 -------- opencompass/multimodal/models/__init__.py | 24 -- .../models/instructblip/__init__.py | 25 -- .../instructblip/blip2_vicuna_instruct.py | 248 ----------- .../models/instructblip/post_processor.py | 111 ----- .../models/instructblip/prompt_constructor.py | 122 ------ .../llama_adapter_v2_multimodal/__init__.py | 8 - .../llama_adapter.py | 337 --------------- .../post_processor.py | 15 - .../prompt_constructor.py | 58 --- .../multimodal/models/llava/__init__.py | 12 - opencompass/multimodal/models/llava/llava.py | 156 ------- .../multimodal/models/llava/post_processor.py | 28 -- .../models/llava/prompt_constructor.py | 139 ------ .../multimodal/models/minigpt_4/__init__.py | 24 -- .../multimodal/models/minigpt_4/minigpt_4.py | 295 ------------- .../models/minigpt_4/post_processor.py | 142 ------ .../models/minigpt_4/prompt_constructor.py | 187 -------- .../multimodal/models/minigpt_4/utils.py | 56 --- .../multimodal/models/mplug_owl/__init__.py | 8 - .../models/mplug_owl/mplug_owl_7b.py | 104 ----- .../models/mplug_owl/post_processor.py | 17 - .../models/mplug_owl/prompt_constructor.py | 58 --- .../models/openflamingo/__init__.py | 12 - .../models/openflamingo/openflamingo.py | 110 ----- .../models/openflamingo/post_processor.py | 13 - .../models/openflamingo/prompt_constructor.py | 130 ------ .../multimodal/models/otter/__init__.py | 19 - opencompass/multimodal/models/otter/otter.py | 79 ---- .../multimodal/models/otter/post_processor.py | 139 ------ .../models/otter/prompt_constructor.py | 168 -------- .../multimodal/models/qwen/__init__.py | 13 - .../models/qwen/generation_utils.py | 293 ------------- .../multimodal/models/qwen/post_processor.py | 31 -- .../models/qwen/prompt_constructor.py | 100 ----- opencompass/multimodal/models/qwen/qwen.py | 329 -------------- .../multimodal/models/visualglm/__init__.py | 15 - .../models/visualglm/post_processor.py | 29 -- .../models/visualglm/prompt_constructor.py | 208 --------- .../multimodal/models/visualglm/visualglm.py | 104 ----- opencompass/partitioners/__init__.py | 1 - opencompass/partitioners/mm_naive.py | 119 ------ opencompass/registry.py | 8 - opencompass/tasks/__init__.py | 1 - opencompass/tasks/mm_infer.py | 160 ------- opencompass/utils/run.py | 21 - tools/eval_mmbench.py | 403 ------------------ 140 files changed, 6 insertions(+), 9382 deletions(-) delete mode 100644 configs/multimodal/instructblip/README.md delete mode 100644 configs/multimodal/instructblip/instructblip_coco_caption.py delete mode 100644 configs/multimodal/instructblip/instructblip_flickr30k.py delete mode 100644 configs/multimodal/instructblip/instructblip_gqa.py delete mode 100644 configs/multimodal/instructblip/instructblip_mmbench.py delete mode 100644 configs/multimodal/instructblip/instructblip_ocr_vqa.py delete mode 100644 configs/multimodal/instructblip/instructblip_ok_vqa.py delete mode 100644 configs/multimodal/instructblip/instructblip_scienceqa.py delete mode 100644 configs/multimodal/instructblip/instructblip_textvqa.py delete mode 100644 configs/multimodal/instructblip/instructblip_vizwiz.py delete mode 100644 configs/multimodal/instructblip/instructblip_vqav2.py delete mode 100644 configs/multimodal/instructblip/instructblip_vsr.py delete mode 100644 configs/multimodal/llama_adapter_v2_multimodal/README.md delete mode 100644 configs/multimodal/llama_adapter_v2_multimodal/llama_adapter_v2_mm_7b_mmbench.py delete mode 100644 configs/multimodal/llava/README.md delete mode 100644 configs/multimodal/llava/llava_7b_coco_caption.py delete mode 100644 configs/multimodal/llava/llava_7b_flickr30k.py delete mode 100644 configs/multimodal/llava/llava_7b_gqa.py delete mode 100644 configs/multimodal/llava/llava_7b_mmbench.py delete mode 100644 configs/multimodal/llava/llava_7b_ocr_vqa.py delete mode 100644 configs/multimodal/llava/llava_7b_ok_vqa.py delete mode 100644 configs/multimodal/llava/llava_7b_scienceqa.py delete mode 100644 configs/multimodal/llava/llava_7b_textvqa.py delete mode 100644 configs/multimodal/llava/llava_7b_vizwiz.py delete mode 100644 configs/multimodal/llava/llava_7b_vqav2.py delete mode 100644 configs/multimodal/llava/llava_7b_vsr.py delete mode 100644 configs/multimodal/minigpt_4/README.md delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_coco_caption.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_flickr30k.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_gqa.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_mmbench.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_mme.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_ocr_vqa.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_ok_vqa.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_scienceqa.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_seedbench.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_textvqa.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_vizwiz.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_vqav2.py delete mode 100644 configs/multimodal/minigpt_4/minigpt_4_7b_vsr.py delete mode 100644 configs/multimodal/mplug_owl/README.md delete mode 100644 configs/multimodal/mplug_owl/mplug_owl_7b_mmbench.py delete mode 100644 configs/multimodal/openflamingo/README.md delete mode 100644 configs/multimodal/openflamingo/openflamingo_coco_caption.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_flickr30k.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_gqa.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_mmbench.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_ocr_vqa.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_ok_vqa.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_scienceqa.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_textvqa.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_vizwiz.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_vqav2.py delete mode 100644 configs/multimodal/openflamingo/openflamingo_vsr.py delete mode 100644 configs/multimodal/otter/README.md delete mode 100644 configs/multimodal/otter/otter_9b_mmbench.py delete mode 100644 configs/multimodal/qwen/qwenvl_base_7b_mmbench.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_coco_caption.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_flickr30k.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_gqa.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_mmbench.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_mmbench_cn.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_ocr_vqa.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_ok_vqa.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_scienceqa.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_textvqa.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_vizwiz.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_vqav2.py delete mode 100644 configs/multimodal/qwen/qwenvl_chat_7b_vsr.py delete mode 100644 configs/multimodal/tasks.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_coco_caption.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_flickr30k.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_gqa.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_mmbench.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_ocr_vqa.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_ok_vqa.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_scienceqa.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_textvqa.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_vizwiz.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_vqav2.py delete mode 100644 configs/multimodal/visualglm/visualglm_6b_vsr.py delete mode 100644 docs/en/MMBench.md delete mode 100644 docs/en/advanced_guides/multimodal_eval.md delete mode 100644 docs/zh_cn/advanced_guides/multimodal_eval.md delete mode 100644 opencompass/multimodal/datasets/__init__.py delete mode 100644 opencompass/multimodal/datasets/mmbench.py delete mode 100644 opencompass/multimodal/datasets/mme.py delete mode 100644 opencompass/multimodal/datasets/seedbench.py delete mode 100644 opencompass/multimodal/models/__init__.py delete mode 100644 opencompass/multimodal/models/instructblip/__init__.py delete mode 100644 opencompass/multimodal/models/instructblip/blip2_vicuna_instruct.py delete mode 100644 opencompass/multimodal/models/instructblip/post_processor.py delete mode 100644 opencompass/multimodal/models/instructblip/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/llama_adapter_v2_multimodal/__init__.py delete mode 100644 opencompass/multimodal/models/llama_adapter_v2_multimodal/llama_adapter.py delete mode 100644 opencompass/multimodal/models/llama_adapter_v2_multimodal/post_processor.py delete mode 100644 opencompass/multimodal/models/llama_adapter_v2_multimodal/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/llava/__init__.py delete mode 100644 opencompass/multimodal/models/llava/llava.py delete mode 100644 opencompass/multimodal/models/llava/post_processor.py delete mode 100644 opencompass/multimodal/models/llava/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/minigpt_4/__init__.py delete mode 100644 opencompass/multimodal/models/minigpt_4/minigpt_4.py delete mode 100644 opencompass/multimodal/models/minigpt_4/post_processor.py delete mode 100644 opencompass/multimodal/models/minigpt_4/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/minigpt_4/utils.py delete mode 100644 opencompass/multimodal/models/mplug_owl/__init__.py delete mode 100644 opencompass/multimodal/models/mplug_owl/mplug_owl_7b.py delete mode 100644 opencompass/multimodal/models/mplug_owl/post_processor.py delete mode 100644 opencompass/multimodal/models/mplug_owl/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/openflamingo/__init__.py delete mode 100644 opencompass/multimodal/models/openflamingo/openflamingo.py delete mode 100644 opencompass/multimodal/models/openflamingo/post_processor.py delete mode 100644 opencompass/multimodal/models/openflamingo/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/otter/__init__.py delete mode 100644 opencompass/multimodal/models/otter/otter.py delete mode 100644 opencompass/multimodal/models/otter/post_processor.py delete mode 100644 opencompass/multimodal/models/otter/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/qwen/__init__.py delete mode 100644 opencompass/multimodal/models/qwen/generation_utils.py delete mode 100644 opencompass/multimodal/models/qwen/post_processor.py delete mode 100644 opencompass/multimodal/models/qwen/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/qwen/qwen.py delete mode 100644 opencompass/multimodal/models/visualglm/__init__.py delete mode 100644 opencompass/multimodal/models/visualglm/post_processor.py delete mode 100644 opencompass/multimodal/models/visualglm/prompt_constructor.py delete mode 100644 opencompass/multimodal/models/visualglm/visualglm.py delete mode 100644 opencompass/partitioners/mm_naive.py delete mode 100644 opencompass/tasks/mm_infer.py delete mode 100644 tools/eval_mmbench.py diff --git a/README.md b/README.md index 063540e9..a996af95 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ Just like a compass guides us on our journey, OpenCompass will guide you through ## 🚀 What's New +- **\[2024.04.26\]** We deprecated the multi-madality evaluating function from OpenCompass, related implement has moved to [VLMEvalKit](https://github.com/open-compass/VLMEvalKit), welcome to use! 🔥🔥🔥. - **\[2024.04.26\]** We supported the evaluation of [ArenaHard](configs/eval_subjective_arena_hard.py) welcome to try!🔥🔥🔥. - **\[2024.04.22\]** We supported the evaluation of [LLaMA3](configs/models/hf_llama/hf_llama3_8b.py) 和 [LLaMA3-Instruct](configs/models/hf_llama/hf_llama3_8b_instruct.py), welcome to try! 🔥🔥🔥 - **\[2024.02.29\]** We supported the MT-Bench, AlpacalEval and AlignBench, more information can be found [here](https://opencompass.readthedocs.io/en/latest/advanced_guides/subjective_evaluation.html) diff --git a/README_zh-CN.md b/README_zh-CN.md index 291524f9..dcec720f 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -60,7 +60,7 @@ 🚩🚩🚩 欢迎加入 OpenCompass!我们目前**招聘全职研究人员/工程师和实习生**。如果您对 LLM 和 OpenCompass 充满热情,请随时通过[电子邮件](mailto:zhangsongyang@pjlab.org.cn)与我们联系。我们非常期待与您交流! -🔥🔥🔥 祝贺 **OpenCompass 作为大模型标准测试工具被Meta AI官方推荐**, 点击 Llama 的 [入门文档](https://ai.meta.com/llama/get-started/#validation) 获取更多信息. +🔥🔥🔥 祝贺 **OpenCompass 作为大模型标准测试工具被Meta AI官方推荐**, 点击 Llama 的 [入门文档](https://ai.meta.com/llama/get-started/#validation) 获取更多信息。 > **注意**
> 我们正式启动 OpenCompass 共建计划,诚邀社区用户为 OpenCompass 提供更具代表性和可信度的客观评测数据集! @@ -69,6 +69,7 @@ ## 🚀 最新进展 +- **\[2024.04.26\]** 我们废弃了 OpenCompass 进行多模态大模型评测的功能,相关功能转移至 [VLMEvalKit](https://github.com/open-compass/VLMEvalKit),推荐使用!🔥🔥🔥. - **\[2024.04.26\]** 我们支持了 [ArenaHard评测](configs/eval_subjective_arena_hard.py) 欢迎试用!🔥🔥🔥. - **\[2024.04.22\]** 我们支持了 [LLaMA3](configs/models/hf_llama/hf_llama3_8b.py) 和 [LLaMA3-Instruct](configs/models/hf_llama/hf_llama3_8b_instruct.py) 的评测,欢迎试用!🔥🔥🔥. - **\[2024.02.29\]** 我们支持了MT-Bench、AlpacalEval和AlignBench,更多信息可以在[这里](https://opencompass.readthedocs.io/en/latest/advanced_guides/subjective_evaluation.html)找到。 diff --git a/configs/multimodal/instructblip/README.md b/configs/multimodal/instructblip/README.md deleted file mode 100644 index 1b5ea393..00000000 --- a/configs/multimodal/instructblip/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# InstructBLIP - -### Prepare the environment - -```sh -git clone https://github.com/salesforce/LAVIS.git -cd ./LAVIS -pip install -e . -``` - -### Modify the config - -Modify the config of InstructBlip, like model path of LLM and Qformer. - -Then update `tasks.py` like the following code snippet. - -```python -from mmengine.config import read_base - -with read_base(): - from .instructblip.instructblip_mmbench import (instruct_blip_dataloader, - instruct_blip_evaluator, - instruct_blip_load_from, - instruct_blip_model) - -models = [instruct_blip_model] -datasets = [instruct_blip_dataloader] -evaluators = [instruct_blip_evaluator] -load_froms = [instruct_blip_load_from] -num_gpus = 8 -num_procs = 8 -launcher = 'pytorch' # or 'slurm' -``` - -### Start evaluation - -#### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -#### PyTorch - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` diff --git a/configs/multimodal/instructblip/instructblip_coco_caption.py b/configs/multimodal/instructblip/instructblip_coco_caption.py deleted file mode 100644 index 54ec3d2b..00000000 --- a/configs/multimodal/instructblip/instructblip_coco_caption.py +++ /dev/null @@ -1,53 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipCOCOCaotionPromptConstructor, - InstructBlipCOCOCaptionPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(384, 384), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.COCOCaption', - data_root='data/coco', - data_prefix=dict(img_path='images'), - ann_file='annotations/coco_karpathy_val.json', - pipeline=val_pipeline) - -instruct_blip_coco_caption_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -instruct_blip_coco_caption_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipCOCOCaotionPromptConstructor), - post_processor=dict(type=InstructBlipCOCOCaptionPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - img_size=384, - is_caption_task=True, -) - -# evaluation settings -instruct_blip_coco_caption_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/coco/annotations/coco_karpathy_val_gt.json', - ) # noqa -] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_flickr30k.py b/configs/multimodal/instructblip/instructblip_flickr30k.py deleted file mode 100644 index 76e0f6f3..00000000 --- a/configs/multimodal/instructblip/instructblip_flickr30k.py +++ /dev/null @@ -1,54 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipCOCOCaotionPromptConstructor, - InstructBlipCOCOCaptionPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(384, 384), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.Flickr30kCaption', - data_root='data/flickr30k', - ann_file='annotations/dataset_flickr30k.json', - data_prefix='images', - split='val', - pipeline=val_pipeline) - -instruct_blip_flickr30k_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -instruct_blip_flickr30k_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipCOCOCaotionPromptConstructor), - post_processor=dict(type=InstructBlipCOCOCaptionPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - img_size=384, - is_caption_task=True, -) - -# evaluation settings -instruct_blip_flickr30k_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/flickr30k/annotations/flickr30k_val_gt.json', - ) # noqa -] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_gqa.py b/configs/multimodal/instructblip/instructblip_gqa.py deleted file mode 100644 index beb1e626..00000000 --- a/configs/multimodal/instructblip/instructblip_gqa.py +++ /dev/null @@ -1,52 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipVQAPromptConstructor, - InstructBlipVQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.GQA', - data_root='data/gqa', - data_prefix='images', - ann_file='annotations/testdev_balanced_questions.json', - pipeline=val_pipeline) - -instruct_blip_gqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_gqa_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipVQAPromptConstructor), - post_processor=dict(type=InstructBlipVQAPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - max_output_txt_len=10, -) - -# evaluation settings -# evaluation settings -instruct_blip_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_mmbench.py b/configs/multimodal/instructblip/instructblip_mmbench.py deleted file mode 100644 index b7113e69..00000000 --- a/configs/multimodal/instructblip/instructblip_mmbench.py +++ /dev/null @@ -1,51 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipMMBenchPromptConstructor, InstructBlipMMBenchPostProcessor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'category', 'l2-category', 'context', 'index', - 'options_dict', 'options', 'split' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -instruct_blip_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipMMBenchPromptConstructor), - post_processor=dict(type=InstructBlipMMBenchPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - sys_prompt= # noqa: E251 - '###Human: What is the capital of China? There are several options:\nA. Beijing\nB. Shanghai\nC. Guangzhou\nD. Shenzhen\n###Assistant: A\n' -) - -# evaluation settings -instruct_blip_evaluator = [ - dict( - type='opencompass.DumpResults', - save_path= # noqa: E251 - 'work_dirs/instructblip_vicuna7b/instructblipvicuna_mmbench.xlsx') -] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed' diff --git a/configs/multimodal/instructblip/instructblip_ocr_vqa.py b/configs/multimodal/instructblip/instructblip_ocr_vqa.py deleted file mode 100644 index 3c46266c..00000000 --- a/configs/multimodal/instructblip/instructblip_ocr_vqa.py +++ /dev/null @@ -1,51 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipVQAPromptConstructor, - InstructBlipVQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.OCRVQA', - data_root='data/ocrvqa', - ann_file='annotations/dataset.json', - split='test', - data_prefix='images', - pipeline=val_pipeline) - -instruct_blip_ocr_vqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_ocr_vqa_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipVQAPromptConstructor), - post_processor=dict(type=InstructBlipVQAPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', -) - -# evaluation settings -instruct_blip_ocr_vqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_ok_vqa.py b/configs/multimodal/instructblip/instructblip_ok_vqa.py deleted file mode 100644 index 7d45e265..00000000 --- a/configs/multimodal/instructblip/instructblip_ok_vqa.py +++ /dev/null @@ -1,54 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipVQAPromptConstructor, - InstructBlipVQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/okvqa', - question_file='annotations/OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/mscoco_val2014_annotations.json', - pipeline=val_pipeline, - data_prefix='images/val2014', -) - -instruct_blip_ok_vqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_ok_vqa_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipVQAPromptConstructor), - post_processor=dict(type=InstructBlipVQAPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - max_output_txt_len=10, -) - -# evaluation settings -instruct_blip_ok_vqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_scienceqa.py b/configs/multimodal/instructblip/instructblip_scienceqa.py deleted file mode 100644 index 3d2211f0..00000000 --- a/configs/multimodal/instructblip/instructblip_scienceqa.py +++ /dev/null @@ -1,53 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipScienceQAPromptConstructor, - InstructBlipScienceQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution', 'has_image' - ]) -] - -dataset = dict(type='mmpretrain.ScienceQA', - data_root='./data/scienceqa', - split='val', - split_file='pid_splits.json', - ann_file='problems.json', - image_only=True, - data_prefix=dict(img_path='val'), - pipeline=val_pipeline) - -instruct_blip_scienceqa_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -instruct_blip_scienceqa_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipScienceQAPromptConstructor), - post_processor=dict(type=InstructBlipScienceQAPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - max_output_txt_len=10, -) - -# evaluation settings -instruct_blip_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_textvqa.py b/configs/multimodal/instructblip/instructblip_textvqa.py deleted file mode 100644 index 6b59aaec..00000000 --- a/configs/multimodal/instructblip/instructblip_textvqa.py +++ /dev/null @@ -1,53 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipVQAPromptConstructor, - InstructBlipVQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.TextVQA', - data_root='data/textvqa', - ann_file='annotations/TextVQA_0.5.1_val.json', - pipeline=val_pipeline, - data_prefix='images/train_images', -) - -instruct_blip_textvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_textvqa_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipVQAPromptConstructor), - post_processor=dict(type=InstructBlipVQAPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - max_output_txt_len=10, -) - -# evaluation settings -instruct_blip_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_vizwiz.py b/configs/multimodal/instructblip/instructblip_vizwiz.py deleted file mode 100644 index 00ca79f8..00000000 --- a/configs/multimodal/instructblip/instructblip_vizwiz.py +++ /dev/null @@ -1,51 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipVQAPromptConstructor, - InstructBlipVQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VizWiz', - data_root='data/vizwiz/', - data_prefix='Images/val', - ann_file='Annotations/val.json', - pipeline=val_pipeline) - -instruct_blip_vizwiz_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_vizwiz_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipVQAPromptConstructor), - post_processor=dict(type=InstructBlipVQAPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - max_output_txt_len=10, -) - -# evaluation settings -instruct_blip_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_vqav2.py b/configs/multimodal/instructblip/instructblip_vqav2.py deleted file mode 100644 index 0dbc56a3..00000000 --- a/configs/multimodal/instructblip/instructblip_vqav2.py +++ /dev/null @@ -1,53 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipVQAPromptConstructor, - InstructBlipVQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/coco', - data_prefix='images/val2014', - question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/v2_mscoco_val2014_annotations.json', - pipeline=val_pipeline) - -instruct_blip_vqav2_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_vqav2_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipVQAPromptConstructor), - post_processor=dict(type=InstructBlipVQAPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - max_output_txt_len=10, -) - -# evaluation settings -instruct_blip_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/instructblip/instructblip_vsr.py b/configs/multimodal/instructblip/instructblip_vsr.py deleted file mode 100644 index 083527a2..00000000 --- a/configs/multimodal/instructblip/instructblip_vsr.py +++ /dev/null @@ -1,51 +0,0 @@ -from opencompass.multimodal.models.instructblip import ( - InstructBlipVSRPromptConstructor, - InstructBlipVSRPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VSR', - data_root='data/vsr/', - data_prefix='images/', - ann_file='annotations/test.json', - pipeline=val_pipeline) - -instruct_blip_vsr_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -instruct_blip_vsr_model = dict( - type='blip2-vicuna-instruct', - prompt_constructor=dict(type=InstructBlipVSRPromptConstructor), - post_processor=dict(type=InstructBlipVSRPostProcessor), - freeze_vit=True, - low_resource=False, - llm_model='/path/to/vicuna-7b/', - max_output_txt_len=10, -) - -# evaluation settings -instruct_blip_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')] - -instruct_blip_load_from = '/path/to/instruct_blip_vicuna7b_trimmed.pth' diff --git a/configs/multimodal/llama_adapter_v2_multimodal/README.md b/configs/multimodal/llama_adapter_v2_multimodal/README.md deleted file mode 100644 index 781cd877..00000000 --- a/configs/multimodal/llama_adapter_v2_multimodal/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Llama Adapter V2 - -### Prepare the environment - -```sh -cd opencompass/multimodal/models/llama_adapter_v2_multimodal -git clone https://github.com/OpenGVLab/LLaMA-Adapter.git -``` - -### Start evaluation - -#### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -#### PyTorch - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` \ No newline at end of file diff --git a/configs/multimodal/llama_adapter_v2_multimodal/llama_adapter_v2_mm_7b_mmbench.py b/configs/multimodal/llama_adapter_v2_multimodal/llama_adapter_v2_mm_7b_mmbench.py deleted file mode 100644 index 2149e178..00000000 --- a/configs/multimodal/llama_adapter_v2_multimodal/llama_adapter_v2_mm_7b_mmbench.py +++ /dev/null @@ -1,48 +0,0 @@ -from opencompass.multimodal.models.llama_adapter_v2_multimodal import ( - LlamaAadapterMMBenchPostProcessor, LlamaAadapterMMBenchPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'answer', 'options', 'category', 'l2-category', - 'index', 'context', 'options_dict' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -llama_adapter_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -llama_adapter_mmbench_model = dict( - type='LLaMA-adapter-v2', - llama_dir= # noqa - '/llama_adapter_v2_multimodal', - prompt_constructor=dict(type=LlamaAadapterMMBenchPromptConstructor), - post_processor=dict(type=LlamaAadapterMMBenchPostProcessor) -) - -# evaluation settings -llama_adapter_mmbench_evaluator = [ - dict( - type='opencompass.DumpResults', - save_path='work_dirs/llama-adapter-v2-multimodal-mmagibench-v0.1.0.xlsx' - ) -] - -llama_adapter_mmbench_load_from = None # noqa - diff --git a/configs/multimodal/llava/README.md b/configs/multimodal/llava/README.md deleted file mode 100644 index 19cde425..00000000 --- a/configs/multimodal/llava/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# LLaVA - -### Prepare the environment - -```sh -cd opencompass/multimodal/models/llava -git clone https://github.com/haotian-liu/LLaVA.git -``` - -Then prepare the environment according to the [install instruction](https://github.com/haotian-liu/LLaVA/tree/main#install) diff --git a/configs/multimodal/llava/llava_7b_coco_caption.py b/configs/multimodal/llava/llava_7b_coco_caption.py deleted file mode 100644 index e0793494..00000000 --- a/configs/multimodal/llava/llava_7b_coco_caption.py +++ /dev/null @@ -1,50 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVABasePromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']), -] - - -dataset = dict(type='mmpretrain.COCOCaption', - data_root='data/coco', - data_prefix=dict(img_path='images'), - ann_file='annotations/coco_karpathy_val.json', - pipeline=val_pipeline) - -llava_coco_caption_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_coco_caption_model = dict( - type='llava', - model_path='/path/to/llava', - is_caption_task=True, - prompt_constructor=dict(type=LLaVABasePromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_coco_caption_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/coco/annotations/coco_karpathy_val_gt.json', - ) # noqa -] - diff --git a/configs/multimodal/llava/llava_7b_flickr30k.py b/configs/multimodal/llava/llava_7b_flickr30k.py deleted file mode 100644 index cdb151b3..00000000 --- a/configs/multimodal/llava/llava_7b_flickr30k.py +++ /dev/null @@ -1,52 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVABasePromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']), -] - - -dataset = dict(type='mmpretrain.Flickr30kCaption', - data_root='data/flickr30k', - ann_file='annotations/dataset_flickr30k.json', - data_prefix='images', - split='val', - pipeline=val_pipeline) - -llava_flickr30k_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_flickr30k_model = dict( - type='llava', - model_path='/path/to/llava', - is_caption_task=True, - prompt_constructor=dict(type=LLaVABasePromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_flickr30k_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/flickr30k/annotations/flickr30k_val_gt.json', - ) # noqa -] - - diff --git a/configs/multimodal/llava/llava_7b_gqa.py b/configs/multimodal/llava/llava_7b_gqa.py deleted file mode 100644 index fe80ac22..00000000 --- a/configs/multimodal/llava/llava_7b_gqa.py +++ /dev/null @@ -1,49 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - - -dataset = dict(type='mmpretrain.GQA', - data_root='data/gqa', - data_prefix='images', - ann_file='annotations/testdev_balanced_questions.json', - pipeline=val_pipeline) - -llava_gqa_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_gqa_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAVQAPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')] - - diff --git a/configs/multimodal/llava/llava_7b_mmbench.py b/configs/multimodal/llava/llava_7b_mmbench.py deleted file mode 100644 index 0ef8eba9..00000000 --- a/configs/multimodal/llava/llava_7b_mmbench.py +++ /dev/null @@ -1,47 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAMMBenchPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'category', 'l2-category', 'context', 'index', - 'options_dict', 'options', 'split' - ], - ), -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -llava_mmbench_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_mmbench_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAMMBenchPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', - save_path='work_dirs/llava-7b-mmbench.xlsx') -] diff --git a/configs/multimodal/llava/llava_7b_ocr_vqa.py b/configs/multimodal/llava/llava_7b_ocr_vqa.py deleted file mode 100644 index 9926128f..00000000 --- a/configs/multimodal/llava/llava_7b_ocr_vqa.py +++ /dev/null @@ -1,49 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.OCRVQA', - data_root='data/ocrvqa', - ann_file='annotations/dataset.json', - split='test', - data_prefix='images', - pipeline=val_pipeline) - -llava_ocrvqa_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_ocrvqa_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAVQAPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - - diff --git a/configs/multimodal/llava/llava_7b_ok_vqa.py b/configs/multimodal/llava/llava_7b_ok_vqa.py deleted file mode 100644 index f2d79cee..00000000 --- a/configs/multimodal/llava/llava_7b_ok_vqa.py +++ /dev/null @@ -1,51 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/okvqa', - question_file='annotations/OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/mscoco_val2014_annotations.json', - pipeline=val_pipeline, - data_prefix='images/val2014', -) - -llava_okvqa_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_okvqa_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAVQAPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - - diff --git a/configs/multimodal/llava/llava_7b_scienceqa.py b/configs/multimodal/llava/llava_7b_scienceqa.py deleted file mode 100644 index 4e7654be..00000000 --- a/configs/multimodal/llava/llava_7b_scienceqa.py +++ /dev/null @@ -1,50 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAScienceQAPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution', 'has_image' - ]) -] - -dataset = dict(type='mmpretrain.ScienceQA', - data_root='./data/scienceqa', - split='val', - split_file='pid_splits.json', - ann_file='problems.json', - image_only=True, - data_prefix=dict(img_path='val'), - pipeline=val_pipeline) - -llava_scienceqa_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_scienceqa_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAScienceQAPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')] - - diff --git a/configs/multimodal/llava/llava_7b_textvqa.py b/configs/multimodal/llava/llava_7b_textvqa.py deleted file mode 100644 index 52dbb030..00000000 --- a/configs/multimodal/llava/llava_7b_textvqa.py +++ /dev/null @@ -1,50 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.TextVQA', - data_root='data/textvqa', - ann_file='annotations/TextVQA_0.5.1_val.json', - pipeline=val_pipeline, - data_prefix='images/train_images', -) - -llava_textvqa_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_textvqa_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAVQAPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - - diff --git a/configs/multimodal/llava/llava_7b_vizwiz.py b/configs/multimodal/llava/llava_7b_vizwiz.py deleted file mode 100644 index 5a26176b..00000000 --- a/configs/multimodal/llava/llava_7b_vizwiz.py +++ /dev/null @@ -1,48 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VizWiz', - data_root='data/vizwiz/', - data_prefix='Images/val', - ann_file='Annotations/val.json', - pipeline=val_pipeline) - -llava_vizwiz_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_vizwiz_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAVQAPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')] - - diff --git a/configs/multimodal/llava/llava_7b_vqav2.py b/configs/multimodal/llava/llava_7b_vqav2.py deleted file mode 100644 index 22a322c5..00000000 --- a/configs/multimodal/llava/llava_7b_vqav2.py +++ /dev/null @@ -1,50 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVABasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/coco', - data_prefix='images/val2014', - question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/v2_mscoco_val2014_annotations.json', - pipeline=val_pipeline) - -llava_vqav2_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_vqav2_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAVQAPromptConstructor), - post_processor=dict(type=LLaVABasePostProcessor) -) # noqa - -# evaluation settings -llava_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')] - - diff --git a/configs/multimodal/llava/llava_7b_vsr.py b/configs/multimodal/llava/llava_7b_vsr.py deleted file mode 100644 index 7985d143..00000000 --- a/configs/multimodal/llava/llava_7b_vsr.py +++ /dev/null @@ -1,48 +0,0 @@ -from opencompass.multimodal.models.llava import LLaVAVQAPromptConstructor, LLaVAVSRPostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VSR', - data_root='data/vsr/', - data_prefix='images/', - ann_file='annotations/test.json', - pipeline=val_pipeline) - -llava_vsr_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -llava_vsr_model = dict( - type='llava', - model_path='/path/to/llava', - prompt_constructor=dict(type=LLaVAVQAPromptConstructor), - post_processor=dict(type=LLaVAVSRPostProcessor) -) # noqa - -# evaluation settings -llava_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')] - - diff --git a/configs/multimodal/minigpt_4/README.md b/configs/multimodal/minigpt_4/README.md deleted file mode 100644 index 5012b4d3..00000000 --- a/configs/multimodal/minigpt_4/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# MiniGPT-4 - -### Prepare the environment - -```sh -cd opencompass/multimodal/models/minigpt_4 -git clone https://github.com/Vision-CAIR/MiniGPT-4.git -``` - -Then prepare the environment according to this [doc](https://github.com/Vision-CAIR/MiniGPT-4) - -### Start evaluation - -#### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -#### PyTorch - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_coco_caption.py b/configs/multimodal/minigpt_4/minigpt_4_7b_coco_caption.py deleted file mode 100644 index f8738c31..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_coco_caption.py +++ /dev/null @@ -1,53 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4COCOCaotionPromptConstructor, - MiniGPT4COCOCaptionPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(384, 384), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.COCOCaption', - data_root='data/coco', - data_prefix=dict(img_path='images'), - ann_file='annotations/coco_karpathy_val.json', - pipeline=val_pipeline) - -minigpt_4_coco_caption_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -minigpt_4_coco_caption_model = dict( - type='minigpt-4', - low_resource=False, - img_size=384, - llama_model='/path/to/vicuna_weights_7b/', - is_caption_task=True, - prompt_constructor=dict(type=MiniGPT4COCOCaotionPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4COCOCaptionPostProcessor)) - -# evaluation settings -minigpt_4_coco_caption_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/coco/annotations/coco_karpathy_val_gt.json', - ) # noqa -] - -minigpt_4_coco_caption_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_flickr30k.py b/configs/multimodal/minigpt_4/minigpt_4_7b_flickr30k.py deleted file mode 100644 index a4df7d22..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_flickr30k.py +++ /dev/null @@ -1,54 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4COCOCaotionPromptConstructor, - MiniGPT4COCOCaptionPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(384, 384), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.Flickr30kCaption', - data_root='data/flickr30k', - ann_file='annotations/dataset_flickr30k.json', - data_prefix='images', - split='val', - pipeline=val_pipeline) - -minigpt_4_flickr30k_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_flickr30k_model = dict( - type='minigpt-4', - low_resource=False, - img_size=384, - llama_model='/path/to/vicuna_weights_7b/', - is_caption_task=True, - prompt_constructor=dict(type=MiniGPT4COCOCaotionPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4COCOCaptionPostProcessor)) - -# evaluation settings -minigpt_4_flickr30k_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/flickr30k/annotations/flickr30k_val_gt.json', - ) # noqa -] - -minigpt_4_flickr30k_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_gqa.py b/configs/multimodal/minigpt_4/minigpt_4_7b_gqa.py deleted file mode 100644 index 5cc0c96b..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_gqa.py +++ /dev/null @@ -1,52 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4VQAPromptConstructor, - MiniGPT4VQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.GQA', - data_root='data/gqa', - data_prefix='images', - ann_file='annotations/testdev_balanced_questions.json', - pipeline=val_pipeline) - -minigpt_4_gqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_gqa_model = dict(type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict( - type=MiniGPT4VQAPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4VQAPostProcessor)) - -# evaluation settings -minigpt_4_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')] - -minigpt_4_gqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_mmbench.py b/configs/multimodal/minigpt_4/minigpt_4_7b_mmbench.py deleted file mode 100644 index 034bd385..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_mmbench.py +++ /dev/null @@ -1,47 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4MMBenchPromptConstructor, MiniGPT4MMBenchPostProcessor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'category', 'l2-category', 'context', 'index', - 'options_dict', 'options', 'split' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -minigpt_4_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_mmbench_model = dict( - type='minigpt-4', - low_resource=False, - llama_model='/path/to/vicuna-7b/', - prompt_constructor=dict(type=MiniGPT4MMBenchPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4MMBenchPostProcessor)) - -# evaluation settings -minigpt_4_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', - save_path='work_dirs/minigpt-4-7b-mmbench.xlsx') -] - -minigpt_4_mmbench_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_mme.py b/configs/multimodal/minigpt_4/minigpt_4_7b_mme.py deleted file mode 100644 index 2824a003..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_mme.py +++ /dev/null @@ -1,43 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import (MiniGPT4MMEPostProcessor, MiniGPT4MMEPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'answer', 'task' - ]) -] - -dataset = dict(type='opencompass.MMEDataset', - data_dir='/path/to/MME', - pipeline=val_pipeline) - -minigpt_4_mme_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -minigpt_4_model = dict( - type='minigpt-4', - low_resource=False, - llama_model='/path/to/vicuna/', - prompt_constructor=dict(type=MiniGPT4MMEPromptConstructor), - post_processor=dict(type=MiniGPT4MMEPostProcessor)) - -# evaluation settings -minigpt_4_mme_evaluator = [ - dict(type='opencompass.MMEMetric') -] - -minigpt_4_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_ocr_vqa.py b/configs/multimodal/minigpt_4/minigpt_4_7b_ocr_vqa.py deleted file mode 100644 index 4c7f520a..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_ocr_vqa.py +++ /dev/null @@ -1,53 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4VQAPromptConstructor, - MiniGPT4VQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.OCRVQA', - data_root='data/ocrvqa', - ann_file='annotations/dataset.json', - split='test', - data_prefix='images', - pipeline=val_pipeline) - -minigpt_4_ocr_vqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_ocr_vqa_model = dict( - type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4VQAPostProcessor)) - -# evaluation settings -minigpt_4_ocr_vqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -minigpt_4_ocr_vqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_ok_vqa.py b/configs/multimodal/minigpt_4/minigpt_4_7b_ok_vqa.py deleted file mode 100644 index a8df7c24..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_ok_vqa.py +++ /dev/null @@ -1,55 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4VQAPromptConstructor, - MiniGPT4VQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/okvqa', - question_file='annotations/OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/mscoco_val2014_annotations.json', - pipeline=val_pipeline, - data_prefix='images/val2014', -) - -minigpt_4_ok_vqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_ok_vqa_model = dict( - type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4VQAPostProcessor)) - -# evaluation settings -minigpt_4_ok_vqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -minigpt_4_ok_vqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_scienceqa.py b/configs/multimodal/minigpt_4/minigpt_4_7b_scienceqa.py deleted file mode 100644 index a61246ad..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_scienceqa.py +++ /dev/null @@ -1,52 +0,0 @@ -from opencompass.multimodal.models import (MiniGPT4ScienceQAPromptConstructor, - MiniGPT4ScienceQAPostProcessor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution', 'has_image' - ]) -] - -dataset = dict(type='mmpretrain.ScienceQA', - data_root='./data/scienceqa', - split='val', - split_file='pid_splits.json', - ann_file='problems.json', - image_only=True, - data_prefix=dict(img_path='val'), - pipeline=val_pipeline) - -minigpt_4_scienceqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_scienceqa_model = dict( - type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict(type=MiniGPT4ScienceQAPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4ScienceQAPostProcessor)) - -# evaluation settings -minigpt_4_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')] - -minigpt_4_scienceqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_seedbench.py b/configs/multimodal/minigpt_4/minigpt_4_7b_seedbench.py deleted file mode 100644 index 051bcb73..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_seedbench.py +++ /dev/null @@ -1,63 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import MiniGPT4SEEDBenchPromptConstructor # noqa - -# dataloader settings -image_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'answer', 'choices', 'data_type', 'question_type_id', - 'index', 'data_path', 'question_id' - ]) -] -video_pipeline = [ - dict(type='mmaction.Resize', scale=(224, 224), interpolation='bicubic'), - dict(type='mmaction.CenterCrop', crop_size=224), - dict(type='Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'answer', 'choices', 'data_type', 'question_type_id', - 'index', 'data_path', 'question_id' - ]) -] - -dataset = dict( - type='opencompass.SEEDBenchDataset', - ann_file='data/seedbench/SEED-Bench.json', - cc3m_path='data/seedbench/SEED-Bench-image', - sthv2_path='data/seedbench/sthv2/videos', - epic_kitchens_path='data/seedbench/3h91syskeag572hl6tvuovwv4d/videos/test', - breakfast_path='data/seedbench/BreakfastII_15fps_qvga_sync', - image_pipeline=image_pipeline, - video_pipeline=video_pipeline, - only_image=True) - -minigpt_4_seedbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_seedbench_model = dict( - type='minigpt-4', - low_resource=False, - llama_model='/path/to/vicuna/', - prompt_constructor=dict(type=MiniGPT4SEEDBenchPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=None, - mode='loss') - -# evaluation settings -minigpt_4_seedbench_evaluator = [dict(type='opencompass.SEEDBenchAcc')] - -minigpt_4_load_from = '/path/to/prerained_minigpt4_7b.pth' diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_textvqa.py b/configs/multimodal/minigpt_4/minigpt_4_7b_textvqa.py deleted file mode 100644 index 07913f73..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_textvqa.py +++ /dev/null @@ -1,55 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4VQAPromptConstructor, - MiniGPT4VQAPostProcessor, -) - - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.TextVQA', - data_root='data/textvqa', - ann_file='annotations/TextVQA_0.5.1_val.json', - pipeline=val_pipeline, - data_prefix='images/train_images', -) - -minigpt_4_textvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_textvqa_model = dict( - type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4VQAPostProcessor)) - -# evaluation settings -minigpt_4_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -minigpt_4_textvqa_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_vizwiz.py b/configs/multimodal/minigpt_4/minigpt_4_7b_vizwiz.py deleted file mode 100644 index 718f7013..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_vizwiz.py +++ /dev/null @@ -1,52 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4VQAPromptConstructor, - MiniGPT4VQAPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VizWiz', - data_root='data/vizwiz/', - data_prefix='Images/val', - ann_file='Annotations/val.json', - pipeline=val_pipeline) - -minigpt_4_vizwiz_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_vizwiz_model = dict( - type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4VQAPostProcessor)) - -# evaluation settings -minigpt_4_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')] - -minigpt_4_vizwiz_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_vqav2.py b/configs/multimodal/minigpt_4/minigpt_4_7b_vqav2.py deleted file mode 100644 index 37f0b6cc..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_vqav2.py +++ /dev/null @@ -1,55 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4VQAPromptConstructor, - MiniGPT4VQAPostProcessor, -) - - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/coco', - data_prefix='images/val2014', - question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/v2_mscoco_val2014_annotations.json', - pipeline=val_pipeline) - -minigpt_4_vqav2_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_vqav2_model = dict( - type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict(type=MiniGPT4VQAPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4VQAPostProcessor)) - -# evaluation settings -minigpt_4_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')] - -minigpt_4_vqav2_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/minigpt_4/minigpt_4_7b_vsr.py b/configs/multimodal/minigpt_4/minigpt_4_7b_vsr.py deleted file mode 100644 index f19a7949..00000000 --- a/configs/multimodal/minigpt_4/minigpt_4_7b_vsr.py +++ /dev/null @@ -1,52 +0,0 @@ -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4VSRPromptConstructor, - MiniGPT4VSRPostProcessor, -) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VSR', - data_root='data/vsr/', - data_prefix='images/', - ann_file='annotations/test.json', - pipeline=val_pipeline) - -minigpt_4_vsr_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_vsr_model = dict( - type='minigpt-4', - low_resource=False, - img_size=224, - max_length=10, - llama_model='/path/to/vicuna_weights_7b/', - prompt_constructor=dict(type=MiniGPT4VSRPromptConstructor, - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4VSRPostProcessor)) - -# evaluation settings -minigpt_4_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')] - -minigpt_4_vsr_load_from = '/path/to/prerained_minigpt4_7b.pth' # noqa diff --git a/configs/multimodal/mplug_owl/README.md b/configs/multimodal/mplug_owl/README.md deleted file mode 100644 index 7425f94b..00000000 --- a/configs/multimodal/mplug_owl/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# MplugOwl - -### Prepare the environment - -```sh -cd opencompass/multimodal/models/mplug_owl -git clone https://github.com/X-PLUG/mPLUG-Owl.git -``` - -### Start evaluation - -#### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -#### PyTorch - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` \ No newline at end of file diff --git a/configs/multimodal/mplug_owl/mplug_owl_7b_mmbench.py b/configs/multimodal/mplug_owl/mplug_owl_7b_mmbench.py deleted file mode 100644 index 322c041f..00000000 --- a/configs/multimodal/mplug_owl/mplug_owl_7b_mmbench.py +++ /dev/null @@ -1,48 +0,0 @@ -from opencompass.multimodal.models.mplug_owl import ( - MplugOwlMMBenchPostProcessor, MplugOwlMMBenchPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict( - type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'answer', 'category', 'l2-category', 'context', - 'index', 'options_dict', 'options' - ], - ), -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -mplug_owl_mmbench_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False), -) - -# model settings -mplug_owl_mmbench_model = dict( - type='mplug_owl-7b', - model_path='/mplug-owl-llama-7b-ft', - prompt_constructor=dict(type=MplugOwlMMBenchPromptConstructor), - post_processor=dict(type=MplugOwlMMBenchPostProcessor) -) # noqa - -# evaluation settings -mplug_owl_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', - save_path='work_dirs/mplug_owl-7b-mmagibench-v0.1.0.xlsx') -] diff --git a/configs/multimodal/openflamingo/README.md b/configs/multimodal/openflamingo/README.md deleted file mode 100644 index c8b62736..00000000 --- a/configs/multimodal/openflamingo/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# OpenFlamingo - -### Prepare the environment - -Install [MMPretrain](https://github.com/open-mmlab/mmpretrain) according to this [doc](https://mmpretrain.readthedocs.io/en/latest/get_started.html#installation) - -### Start evaluation - -#### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -#### PyTorch - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` \ No newline at end of file diff --git a/configs/multimodal/openflamingo/openflamingo_coco_caption.py b/configs/multimodal/openflamingo/openflamingo_coco_caption.py deleted file mode 100644 index dad7b1b2..00000000 --- a/configs/multimodal/openflamingo/openflamingo_coco_caption.py +++ /dev/null @@ -1,75 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.COCOCaption', - data_root='data/coco', - data_prefix=dict(img_path='images'), - ann_file='annotations/coco_karpathy_val.json', - pipeline=val_pipeline) - -openflamingo_coco_caption_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_coco_caption_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='caption', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor) -) - -# evaluation settings -openflamingo_coco_caption_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/coco/annotations/coco_karpathy_val_gt.json', - ) # noqa -] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_flickr30k.py b/configs/multimodal/openflamingo/openflamingo_flickr30k.py deleted file mode 100644 index e388f6c2..00000000 --- a/configs/multimodal/openflamingo/openflamingo_flickr30k.py +++ /dev/null @@ -1,76 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoCaptionPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.Flickr30kCaption', - data_root='data/flickr30k', - ann_file='annotations/dataset_flickr30k.json', - data_prefix='images', - split='val', - pipeline=val_pipeline) - -openflamingo_flickr30k_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_flickr30k_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='caption', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoCaptionPromptConstructor) -) - -# evaluation settings -openflamingo_flickr30k_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/flickr30k/annotations/flickr30k_val_gt.json', - ) # noqa -] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_gqa.py b/configs/multimodal/openflamingo/openflamingo_gqa.py deleted file mode 100644 index c4c33303..00000000 --- a/configs/multimodal/openflamingo/openflamingo_gqa.py +++ /dev/null @@ -1,75 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.GQA', - data_root='data/gqa', - data_prefix='images', - ann_file='annotations/testdev_balanced_questions.json', - pipeline=val_pipeline) - -openflamingo_gqa_dataloader = dict( - batch_size=8, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_gqa_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor) -) - -# evaluation settings -openflamingo_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')] - - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_mmbench.py b/configs/multimodal/openflamingo/openflamingo_mmbench.py deleted file mode 100644 index f01e5a78..00000000 --- a/configs/multimodal/openflamingo/openflamingo_mmbench.py +++ /dev/null @@ -1,77 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoMMBenchPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.PILToNumpy'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'options', 'category', 'l2-category', 'index', - 'context', 'options_dict' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -openflamingo_mmbench_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_mmbench_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoMMBenchPromptConstructor) -) - -# evaluation settings -openflamingo_mmbench_evaluator = [ - dict( - type='opencompass.DumpResults', - save_path= # noqa: E251 - 'work_dirs/9b-flamingo/9b-flamingo-mmbench.xlsx') -] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_ocr_vqa.py b/configs/multimodal/openflamingo/openflamingo_ocr_vqa.py deleted file mode 100644 index 10298830..00000000 --- a/configs/multimodal/openflamingo/openflamingo_ocr_vqa.py +++ /dev/null @@ -1,75 +0,0 @@ -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.OCRVQA', - data_root='data/ocrvqa', - ann_file='annotations/dataset.json', - split='test', - data_prefix='images', - pipeline=val_pipeline) - -openflamingo_ocrvqa_dataloader = dict( - batch_size=8, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor - -# model settings -openflamingo_ocrvqa_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor) -) - -# evaluation settings -openflamingo_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_ok_vqa.py b/configs/multimodal/openflamingo/openflamingo_ok_vqa.py deleted file mode 100644 index 733d1457..00000000 --- a/configs/multimodal/openflamingo/openflamingo_ok_vqa.py +++ /dev/null @@ -1,77 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/okvqa', - question_file='annotations/OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/mscoco_val2014_annotations.json', - pipeline=val_pipeline, - data_prefix='images/val2014', -) - -openflamingo_okvqa_dataloader = dict( - batch_size=8, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_okvqa_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor) -) - -# evaluation settings -openflamingo_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_scienceqa.py b/configs/multimodal/openflamingo/openflamingo_scienceqa.py deleted file mode 100644 index 292b9146..00000000 --- a/configs/multimodal/openflamingo/openflamingo_scienceqa.py +++ /dev/null @@ -1,76 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoScienceQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution' - ]) -] - -dataset = dict(type='mmpretrain.ScienceQA', - data_root='./data/scienceqa', - split='val', - split_file='pid_splits.json', - ann_file='problems.json', - image_only=True, - data_prefix=dict(img_path='val'), - pipeline=val_pipeline) - -openflamingo_scienceqa_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_scienceqa_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoScienceQAPromptConstructor) -) - -# evaluation settings -openflamingo_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_textvqa.py b/configs/multimodal/openflamingo/openflamingo_textvqa.py deleted file mode 100644 index 67f0b343..00000000 --- a/configs/multimodal/openflamingo/openflamingo_textvqa.py +++ /dev/null @@ -1,76 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.TextVQA', - data_root='data/textvqa', - ann_file='annotations/TextVQA_0.5.1_val.json', - pipeline=val_pipeline, - data_prefix='images/train_images', -) - -openflamingo_textvqa_dataloader = dict( - batch_size=8, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_textvqa_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor) -) - -# evaluation settings -openflamingo_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_vizwiz.py b/configs/multimodal/openflamingo/openflamingo_vizwiz.py deleted file mode 100644 index e9b5262a..00000000 --- a/configs/multimodal/openflamingo/openflamingo_vizwiz.py +++ /dev/null @@ -1,74 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VizWiz', - data_root='data/vizwiz/', - data_prefix='Images/val', - ann_file='Annotations/val.json', - pipeline=val_pipeline) - -openflamingo_vizwiz_dataloader = dict( - batch_size=8, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_vizwiz_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor) -) - -# evaluation settings -openflamingo_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')] - - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_vqav2.py b/configs/multimodal/openflamingo/openflamingo_vqav2.py deleted file mode 100644 index 52d4dbf5..00000000 --- a/configs/multimodal/openflamingo/openflamingo_vqav2.py +++ /dev/null @@ -1,75 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/coco', - data_prefix='images/val2014', - question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/v2_mscoco_val2014_annotations.json', - pipeline=val_pipeline) - -openflamingo_vqav2_dataloader = dict( - batch_size=8, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_vqav2_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor) -) - -# evaluation settings -openflamingo_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/openflamingo/openflamingo_vsr.py b/configs/multimodal/openflamingo/openflamingo_vsr.py deleted file mode 100644 index 0130962d..00000000 --- a/configs/multimodal/openflamingo/openflamingo_vsr.py +++ /dev/null @@ -1,75 +0,0 @@ -from opencompass.multimodal.models.openflamingo import OpenFlamingoVQAPromptConstructor, OpenFlamingoVSRPostProcessor -# dataloader settings -val_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='mmpretrain.ResizeEdge', - scale=224, - interpolation='bicubic', - backend='pillow'), - dict(type='CenterCrop', crop_size=(224, 224)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VSR', - data_root='data/vsr/', - data_prefix='images/', - ann_file='annotations/test.json', - pipeline=val_pipeline) - -openflamingo_vsr_dataloader = dict( - batch_size=8, - num_workers=4, - dataset=dataset, - sampler=dict(type='DefaultSampler', shuffle=False), - collate_fn=dict(type='default_collate'), - persistent_workers=True, -) - -# model settings -openflamingo_vsr_model = dict( - type='openflamingo', - data_preprocessor=dict( - type='mmpretrain.MultiModalDataPreprocessor', - mean=[122.770938, 116.7460125, 104.09373615], - std=[68.5005327, 66.6321579, 70.32316305], - to_rgb=True, - ), - tokenizer=dict(type='mmpretrain.LlamaTokenizer', - name_or_path='decapoda-research/llama-7b-hf'), - vision_encoder=dict( - type='mmpretrain.VisionTransformer', - arch='l', - patch_size=14, - pre_norm=True, - norm_cfg=dict(type='LN', eps=1e-5), - layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), - final_norm=False, - out_type='raw', - pretrained= # noqa: E251 - '/path/to/vision/encoder', # noqa - ), - lang_encoder=dict( - base=dict(type='mmpretrain.AutoModelForCausalLM', - name_or_path= - 'decapoda-research/llama-7b-hf', - local_files_only=True), - adapter=dict(type='mmpretrain.FlamingoLMAdapter', - vis_hidden_size=1024, - cross_attn_every_n_layers=4, - use_media_placement_augmentation=False), - ), - task='vqa', - generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), - prompt_constructor=dict(type=OpenFlamingoVQAPromptConstructor, shot_prompt=('The cat is behind the laptop. Short Answer:yes<|endofchunk|>' # noqa: E501 - 'The cow is ahead of the person. Short Answer:no<|endofchunk|>')), - post_processor=dict(type=OpenFlamingoVSRPostProcessor) -) - -# evaluation settings -openflamingo_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')] - -openflamingo_load_from = '/path/to/pretrained/weights' # noqa diff --git a/configs/multimodal/otter/README.md b/configs/multimodal/otter/README.md deleted file mode 100644 index b747415a..00000000 --- a/configs/multimodal/otter/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# OTTER: Multi-modal In-context Instruction Tuning. - -### Prepare the environment - -```sh -pip install otter_ai -``` - -### Start evaluation - -#### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -#### PyTorch - - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` \ No newline at end of file diff --git a/configs/multimodal/otter/otter_9b_mmbench.py b/configs/multimodal/otter/otter_9b_mmbench.py deleted file mode 100644 index 2b783018..00000000 --- a/configs/multimodal/otter/otter_9b_mmbench.py +++ /dev/null @@ -1,43 +0,0 @@ -# dataloader settings -from opencompass.multimodal.models.otter import ( - OTTERMMBenchPromptConstructor, OTTERMMBenchPostProcessor) - -val_pipeline = [ - dict(type="mmpretrain.torchvision/Resize", size=(224, 224), interpolation=3), - dict(type="mmpretrain.torchvision/ToTensor"), - dict( - type="mmpretrain.torchvision/Normalize", - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711), - ), - dict( - type="mmpretrain.PackInputs", - algorithm_keys=["question", "answer", "options", "category", "l2-category", "context", "index", "options_dict"], - ), -] - -dataset = dict( - type="opencompass.MMBenchDataset", data_file="/path/to/mmbench/mmbench_test_20230712.tsv", pipeline=val_pipeline -) - -otter_9b_mmbench_dataloader = dict( - batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type="pseudo_collate"), - sampler=dict(type="DefaultSampler", shuffle=False), -) - -# model settings -otter_9b_mmbench_model = dict( - type="otter-9b", - model_path="/path/to/OTTER-Image-MPT7B/", # noqa - load_bit="bf16", - prompt_constructor=dict(type=OTTERMMBenchPromptConstructor, - model_label='GPT', - user_label='User'), - post_processor=dict(type=OTTERMMBenchPostProcessor) -) - -# evaluation settings -otter_9b_mmbench_evaluator = [dict(type="opencompass.DumpResults", save_path="work_dirs/otter-9b-mmbench.xlsx")] diff --git a/configs/multimodal/qwen/qwenvl_base_7b_mmbench.py b/configs/multimodal/qwen/qwenvl_base_7b_mmbench.py deleted file mode 100644 index 23cfb8e6..00000000 --- a/configs/multimodal/qwen/qwenvl_base_7b_mmbench.py +++ /dev/null @@ -1,41 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor, QwenVLBasePostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'options', 'category', 'l2-category', 'context', - 'index', 'options_dict' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -qwen_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_model = dict( - type='qwen-vl-base', - pretrained_path='Qwen/Qwen-VL', # or Huggingface repo id - prompt_constructor=dict(type=QwenMMBenchPromptConstructor), - post_processor=dict(type=QwenVLBasePostProcessor) -) - -# evaluation settings -qwen_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', - save_path='work_dirs/qwenvl-base-7b-mmbench.xlsx') -] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_coco_caption.py b/configs/multimodal/qwen/qwenvl_chat_7b_coco_caption.py deleted file mode 100644 index 96b22e84..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_coco_caption.py +++ /dev/null @@ -1,44 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.COCOCaption', - data_root='data/coco', - data_prefix=dict(img_path='images'), - ann_file='annotations/coco_karpathy_val.json', - pipeline=val_pipeline) - -qwen_coco_caption_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_coco_caption_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'), - is_caption_task=True, -) - -# evaluation settings -qwen_coco_caption_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/coco/annotations/coco_karpathy_val_gt.json', - ) # noqa -] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_flickr30k.py b/configs/multimodal/qwen/qwenvl_chat_7b_flickr30k.py deleted file mode 100644 index c286d064..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_flickr30k.py +++ /dev/null @@ -1,44 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - -dataset = dict(type='mmpretrain.Flickr30kCaption', - data_root='data/flickr30k', - ann_file='annotations/dataset_flickr30k.json', - data_prefix='images', - split='val', - pipeline=val_pipeline) - -qwen_flickr30k_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_flickr30k_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatPromptConstructor, prompt='Describe the image.'), - is_caption_task=True, -) - -# evaluation settings -qwen_flickr30k_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/flickr30k/annotations/flickr30k_val_gt.json', - ) # noqa -] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_gqa.py b/configs/multimodal/qwen/qwenvl_chat_7b_gqa.py deleted file mode 100644 index 8ad5e0f2..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_gqa.py +++ /dev/null @@ -1,41 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.GQA', - data_root='data/gqa', - data_prefix='images', - ann_file='annotations/testdev_balanced_questions.json', - pipeline=val_pipeline) - -qwen_gqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_gqa_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor) -) - -# evaluation settings -qwen_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_mmbench.py b/configs/multimodal/qwen/qwenvl_chat_7b_mmbench.py deleted file mode 100644 index de665e4c..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_mmbench.py +++ /dev/null @@ -1,40 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'options', 'category', 'l2-category', 'context', - 'index', 'options_dict' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -qwen_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLMMBenchPromptConstructor) -) - -# evaluation settings -qwen_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', - save_path='work_dirs/qwenvl-chat-7b-mmbench.xlsx') -] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_mmbench_cn.py b/configs/multimodal/qwen/qwenvl_chat_7b_mmbench_cn.py deleted file mode 100644 index 18e811f8..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_mmbench_cn.py +++ /dev/null @@ -1,41 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLMMBenchPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'options', 'category', 'l2-category', 'context', - 'index', 'options_dict' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='/mnt/petrelfs/share_data/yuanyike/cnbench_v010_rolling.tsv', - pipeline=val_pipeline, - sys_prompt='请从以下选项中选择一个正确选项。') - -qwen_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLMMBenchPromptConstructor) -) - -# evaluation settings -qwen_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', - save_path='work_dirs/qwenvl-chat-7b-cnbench-v010.xlsx') -] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_ocr_vqa.py b/configs/multimodal/qwen/qwenvl_chat_7b_ocr_vqa.py deleted file mode 100644 index 3ae7c32e..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_ocr_vqa.py +++ /dev/null @@ -1,42 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.OCRVQA', - data_root='data/ocrvqa', - ann_file='annotations/dataset.json', - split='test', - data_prefix='images', - pipeline=val_pipeline) - -qwen_ocrvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_ocrvqa_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor) -) - -# evaluation settings -qwen_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_ok_vqa.py b/configs/multimodal/qwen/qwenvl_chat_7b_ok_vqa.py deleted file mode 100644 index a1261e89..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_ok_vqa.py +++ /dev/null @@ -1,44 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/okvqa', - question_file='annotations/OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/mscoco_val2014_annotations.json', - pipeline=val_pipeline, - data_prefix='images/val2014', -) - -qwen_okvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_okvqa_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor) -) - -# evaluation settings -qwen_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_scienceqa.py b/configs/multimodal/qwen/qwenvl_chat_7b_scienceqa.py deleted file mode 100644 index 49ac6849..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_scienceqa.py +++ /dev/null @@ -1,43 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatScienceQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution' - ]) -] - -dataset = dict(type='mmpretrain.ScienceQA', - data_root='./data/scienceqa', - split='val', - split_file='pid_splits.json', - ann_file='problems.json', - image_only=True, - data_prefix=dict(img_path='val'), - pipeline=val_pipeline) - -qwen_scienceqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_scienceqa_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatScienceQAPromptConstructor) -) - -# evaluation settings -qwen_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_textvqa.py b/configs/multimodal/qwen/qwenvl_chat_7b_textvqa.py deleted file mode 100644 index fec8a1d4..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_textvqa.py +++ /dev/null @@ -1,43 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.TextVQA', - data_root='data/textvqa', - ann_file='annotations/TextVQA_0.5.1_val.json', - pipeline=val_pipeline, - data_prefix='images/train_images', -) - -qwen_textvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_textvqa_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor) -) - -# evaluation settings -qwen_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_vizwiz.py b/configs/multimodal/qwen/qwenvl_chat_7b_vizwiz.py deleted file mode 100644 index 513a360e..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_vizwiz.py +++ /dev/null @@ -1,41 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VizWiz', - data_root='data/vizwiz/', - data_prefix='Images/val', - ann_file='Annotations/val.json', - pipeline=val_pipeline) - -qwen_vizwiz_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_vizwiz_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor) -) - -# evaluation settings -qwen_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_vqav2.py b/configs/multimodal/qwen/qwenvl_chat_7b_vqav2.py deleted file mode 100644 index 5c855652..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_vqav2.py +++ /dev/null @@ -1,43 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/coco', - data_prefix='images/val2014', - question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/v2_mscoco_val2014_annotations.json', - pipeline=val_pipeline) - -qwen_vqav2_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_vqav2_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor) -) - -# evaluation settings -qwen_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/qwen/qwenvl_chat_7b_vsr.py b/configs/multimodal/qwen/qwenvl_chat_7b_vsr.py deleted file mode 100644 index 331a48f4..00000000 --- a/configs/multimodal/qwen/qwenvl_chat_7b_vsr.py +++ /dev/null @@ -1,42 +0,0 @@ -from opencompass.multimodal.models.qwen import QwenVLChatVQAPromptConstructor, QwenVLChatVSRPostProcessor - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(448, 448), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VSR', - data_root='data/vsr/', - data_prefix='images/', - ann_file='annotations/test.json', - pipeline=val_pipeline) - -qwen_vsr_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -qwen_vsr_model = dict( - type='qwen-vl-chat', - pretrained_path='Qwen/Qwen-VL-Chat', # or Huggingface repo id - prompt_constructor=dict(type=QwenVLChatVQAPromptConstructor), - post_processor=dict(type=QwenVLChatVSRPostProcessor) -) - -# evaluation settings -qwen_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')] diff --git a/configs/multimodal/tasks.py b/configs/multimodal/tasks.py deleted file mode 100644 index 56dee084..00000000 --- a/configs/multimodal/tasks.py +++ /dev/null @@ -1,16 +0,0 @@ -from mmengine.config import read_base - -with read_base(): - from .minigpt_4.minigpt_4_7b_mmbench import (minigpt_4_mmbench_dataloader, - minigpt_4_mmbench_evaluator, - minigpt_4_mmbench_load_from, - minigpt_4_mmbench_model) - -models = [minigpt_4_mmbench_model] -datasets = [minigpt_4_mmbench_dataloader] -evaluators = [minigpt_4_mmbench_evaluator] -load_froms = [minigpt_4_mmbench_load_from] - -num_gpus = 8 -num_procs = 8 -launcher = 'pytorch' diff --git a/configs/multimodal/visualglm/visualglm_6b_coco_caption.py b/configs/multimodal/visualglm/visualglm_6b_coco_caption.py deleted file mode 100644 index c2ffa6a9..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_coco_caption.py +++ /dev/null @@ -1,45 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMBasePromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - - -dataset = dict(type='mmpretrain.COCOCaption', - data_root='data/coco', - data_prefix=dict(img_path='images'), - ann_file='annotations/coco_karpathy_val.json', - pipeline=val_pipeline) - -visualglm_coco_caption_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_coco_caption_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - is_caption_task=True, - prompt_constructor=dict(type=VisualGLMBasePromptConstructor, system_prompt='Describe the image.'), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_coco_caption_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/coco/annotations/coco_karpathy_val_gt.json', - ) # noqa -] diff --git a/configs/multimodal/visualglm/visualglm_6b_flickr30k.py b/configs/multimodal/visualglm/visualglm_6b_flickr30k.py deleted file mode 100644 index 9860ba78..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_flickr30k.py +++ /dev/null @@ -1,46 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMBasePromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', algorithm_keys=['image_id']) -] - - -dataset = dict(type='mmpretrain.Flickr30kCaption', - data_root='data/flickr30k', - ann_file='annotations/dataset_flickr30k.json', - data_prefix='images', - split='val', - pipeline=val_pipeline) - -visualglm_flickr30k_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_flickr30k_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - is_caption_task=True, - prompt_constructor=dict(type=VisualGLMBasePromptConstructor, system_prompt='Describe the image.'), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_flickr30k_evaluator = [ - dict( - type='mmpretrain.COCOCaption', - ann_file='data/flickr30k/annotations/flickr30k_val_gt.json', - ) # noqa -] diff --git a/configs/multimodal/visualglm/visualglm_6b_gqa.py b/configs/multimodal/visualglm/visualglm_6b_gqa.py deleted file mode 100644 index c812afbb..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_gqa.py +++ /dev/null @@ -1,42 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.GQA', - data_root='data/gqa', - data_prefix='images', - ann_file='annotations/testdev_balanced_questions.json', - pipeline=val_pipeline) - -visualglm_gqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_gqa_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMVQAPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_gqa_evaluator = [dict(type='mmpretrain.GQAAcc')] diff --git a/configs/multimodal/visualglm/visualglm_6b_mmbench.py b/configs/multimodal/visualglm/visualglm_6b_mmbench.py deleted file mode 100644 index 8821fe4a..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_mmbench.py +++ /dev/null @@ -1,42 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMMMBenchPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'options', 'category', 'l2-category', 'context', - 'index', 'options_dict' - ]) -] - -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -visualglm_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_mmbench_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMMMBenchPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor), - gen_kwargs=dict(max_new_tokens=50,num_beams=5,do_sample=False,repetition_penalty=1.0,length_penalty=-1.0) -) - -# evaluation settings -visualglm_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', - save_path='work_dirs/visualglm-6b-mmbench.xlsx') -] diff --git a/configs/multimodal/visualglm/visualglm_6b_ocr_vqa.py b/configs/multimodal/visualglm/visualglm_6b_ocr_vqa.py deleted file mode 100644 index 5b991cfa..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_ocr_vqa.py +++ /dev/null @@ -1,43 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.OCRVQA', - data_root='data/ocrvqa', - ann_file='annotations/dataset.json', - split='test', - data_prefix='images', - pipeline=val_pipeline) - -visualglm_ocrvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_ocrvqa_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMVQAPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_ocrvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/visualglm/visualglm_6b_ok_vqa.py b/configs/multimodal/visualglm/visualglm_6b_ok_vqa.py deleted file mode 100644 index f3c7784b..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_ok_vqa.py +++ /dev/null @@ -1,45 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/okvqa', - question_file='annotations/OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/mscoco_val2014_annotations.json', - pipeline=val_pipeline, - data_prefix='images/val2014', -) - -visualglm_okvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_okvqa_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMVQAPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_okvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/visualglm/visualglm_6b_scienceqa.py b/configs/multimodal/visualglm/visualglm_6b_scienceqa.py deleted file mode 100644 index 8ec2393c..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_scienceqa.py +++ /dev/null @@ -1,44 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMScienceQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'gt_answer', 'choices', 'hint', 'lecture', 'solution', 'has_image' - ]) -] - -dataset = dict(type='mmpretrain.ScienceQA', - data_root='./data/scienceqa', - split='val', - split_file='pid_splits.json', - ann_file='problems.json', - image_only=True, - data_prefix=dict(img_path='val'), - pipeline=val_pipeline) - -visualglm_scienceqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_scienceqa_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMScienceQAPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_scienceqa_evaluator = [dict(type='mmpretrain.ScienceQAMetric')] \ No newline at end of file diff --git a/configs/multimodal/visualglm/visualglm_6b_textvqa.py b/configs/multimodal/visualglm/visualglm_6b_textvqa.py deleted file mode 100644 index a99ee625..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_textvqa.py +++ /dev/null @@ -1,44 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.TextVQA', - data_root='data/textvqa', - ann_file='annotations/TextVQA_0.5.1_val.json', - pipeline=val_pipeline, - data_prefix='images/train_images', -) - -visualglm_textvqa_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_textvqa_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMVQAPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_textvqa_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/visualglm/visualglm_6b_vizwiz.py b/configs/multimodal/visualglm/visualglm_6b_vizwiz.py deleted file mode 100644 index 1accb4ac..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_vizwiz.py +++ /dev/null @@ -1,42 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict(type='mmpretrain.VizWiz', - data_root='data/vizwiz/', - data_prefix='Images/val', - ann_file='Annotations/val.json', - pipeline=val_pipeline) - -visualglm_vizwiz_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_vizwiz_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMVQAPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_vizwiz_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/visualglm/visualglm_6b_vqav2.py b/configs/multimodal/visualglm/visualglm_6b_vqav2.py deleted file mode 100644 index ff3083dd..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_vqav2.py +++ /dev/null @@ -1,44 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMBasePostProcessor, VisualGLMVQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - -dataset = dict( - type='mmpretrain.COCOVQA', - data_root='data/coco', - data_prefix='images/val2014', - question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', - ann_file='annotations/v2_mscoco_val2014_annotations.json', - pipeline=val_pipeline) - -visualglm_vqav2_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_vqav2_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMVQAPromptConstructor), - post_processor=dict(type=VisualGLMBasePostProcessor) -) - -# evaluation settings -visualglm_vqav2_evaluator = [dict(type='mmpretrain.VQAAcc')] diff --git a/configs/multimodal/visualglm/visualglm_6b_vsr.py b/configs/multimodal/visualglm/visualglm_6b_vsr.py deleted file mode 100644 index f932975d..00000000 --- a/configs/multimodal/visualglm/visualglm_6b_vsr.py +++ /dev/null @@ -1,43 +0,0 @@ -from opencompass.multimodal.models.visualglm import (VisualGLMVSRPostProcessor, VisualGLMVQAPromptConstructor) - -# dataloader settings -val_pipeline = [ - dict(type='mmpretrain.LoadImageFromFile'), - dict(type='mmpretrain.ToPIL', to_rgb=True), - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict( - type='mmpretrain.PackInputs', - algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], - meta_keys=['question_id', 'image_id'], - ) -] - - -dataset = dict(type='mmpretrain.VSR', - data_root='data/vsr/', - data_prefix='images/', - ann_file='annotations/test.json', - pipeline=val_pipeline) - -visualglm_vsr_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', shuffle=False)) - -# model settings -visualglm_vsr_model = dict( - type='visualglm', - pretrained_path='/path/to/visualglm', # or Huggingface repo id - prompt_constructor=dict(type=VisualGLMVQAPromptConstructor), - post_processor=dict(type=VisualGLMVSRPostProcessor) -) - -# evaluation settings -visualglm_vsr_evaluator = [dict(type='mmpretrain.GQAAcc')] diff --git a/docs/en/MMBench.md b/docs/en/MMBench.md deleted file mode 100644 index 04213883..00000000 --- a/docs/en/MMBench.md +++ /dev/null @@ -1,132 +0,0 @@ -# Evaluation pipeline on MMBench - -## Intro to each data sample in MMBench - -MMBecnh is split into **dev** and **test** split, and each data sample in each split contains the following field: - -``` -img: the raw data of an image -question: the question -options: the concated options -category: the leaf category -l2-category: the l2-level category -options_dict: the dict contains all options -index: the unique identifier of current question -context (optional): the context to a question, which is optional. -answer: the target answer to current question. (only exists in the dev split, and is keep confidential for the test split on our evaluation server) -``` - -## Load MMBench - -We provide a code snippet as an example of loading MMBench - -```python -import base64 -import io -import random - -import pandas as pd -from PIL import Image -from torch.utils.data import Dataset - -def decode_base64_to_image(base64_string): - image_data = base64.b64decode(base64_string) - image = Image.open(io.BytesIO(image_data)) - return image - -class MMBenchDataset(Dataset): - def __init__(self, - data_file, - sys_prompt='There are several options:'): - self.df = pd.read_csv(data_file, sep='\t') - self.sys_prompt = sys_prompt - - def __len__(self): - return len(self.df) - - def __getitem__(self, idx): - index = self.df.iloc[idx]['index'] - image = self.df.iloc[idx]['image'] - image = decode_base64_to_image(image) - question = self.df.iloc[idx]['question'] - answer = self.df.iloc[idx]['answer'] if 'answer' in self.df.iloc[0].keys() else None - catetory = self.df.iloc[idx]['category'] - l2_catetory = self.df.iloc[idx]['l2-category'] - - option_candidate = ['A', 'B', 'C', 'D', 'E'] - options = { - cand: self.load_from_df(idx, cand) - for cand in option_candidate - if self.load_from_df(idx, cand) is not None - } - options_prompt = f'{self.sys_prompt}\n' - for key, item in options.items(): - options_prompt += f'{key}. {item}\n' - - hint = self.load_from_df(idx, 'hint') - data = { - 'img': image, - 'question': question, - 'answer': answer, - 'options': options_prompt, - 'category': catetory, - 'l2-category': l2_catetory, - 'options_dict': options, - 'index': index, - 'context': hint, - } - return data - def load_from_df(self, idx, key): - if key in self.df.iloc[idx] and not pd.isna(self.df.iloc[idx][key]): - return self.df.iloc[idx][key] - else: - return None -``` - -## How to construct the inference prompt - -```python -if data_sample['context'] is not None: - prompt = data_sample['context'] + ' ' + data_sample['question'] + ' ' + data_sample['options'] -else: - prompt = data_sample['question'] + ' ' + data_sample['options'] -``` - -For example: -Question: Which category does this image belong to? -A. Oil Painting -B. Sketch -C. Digital art -D. Photo - -
- -
- -```python -prompt = """ -###Human: Question: Which category does this image belong to? -There are several options: A. Oil Painting, B. Sketch, C. Digital art, D. Photo -###Assistant: -""" -``` - -You can make custom modifications to the prompt - -## How to save results: - -You should dump your model's predictions into an excel(.xlsx) file, and this file should contain the following fields: - -``` -question: the question -A: The first choice -B: The second choice -C: The third choice -D: The fourth choice -prediction: The prediction of your model to current question -category: the leaf category -l2_category: the l2-level category -index: the question index -``` - -If there are any questions with fewer than four options, simply leave those fields blank. diff --git a/docs/en/advanced_guides/multimodal_eval.md b/docs/en/advanced_guides/multimodal_eval.md deleted file mode 100644 index 967d1537..00000000 --- a/docs/en/advanced_guides/multimodal_eval.md +++ /dev/null @@ -1,108 +0,0 @@ -# Multi-modality Evaluation - -We support several multi-modality datasets, such as [MMBench](https://opencompass.org.cn/MMBench), [SEED-Bench](https://github.com/AILab-CVC/SEED-Bench) to evaluate multi-modality models. Before starting, please make sure you have downloaded the evaluation datasets following the official instruction. - -## Start Evaluation - -Before evaluation, you could modify `tasks.py` or create a new file like `tasks.py` to evaluate your own model. - -Generally to run the evaluation, we use command below. - -### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -### PyTorch - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` - -## Configuration File - -We adapt the new config format of [MMEngine](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta). - -### Task File - -Here is the example config of `configs/multimodal/tasks.py`. - -```python -from mmengine.config import read_base - -with read_base(): - from .minigpt_4.minigpt_4_7b_mmbench import (minigpt_4_mmbench_dataloader, - minigpt_4_mmbench_evaluator, - minigpt_4_mmbench_load_from, - minigpt_4_mmbench_model) - -models = [minigpt_4_mmbench_model] -datasets = [minigpt_4_mmbench_dataloader] -evaluators = [minigpt_4_mmbench_evaluator] -load_froms = [minigpt_4_mmbench_load_from] - -# set the platform and resources -num_gpus = 8 -num_procs = 8 -launcher = 'pytorch' -``` - -### Details of Task - -Here is an example of MiniGPT-4 with MMBench and we provide some comments for -users to understand the meaning of the keys in config. - -```python -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4MMBenchPromptConstructor, MiniGPT4MMBenchPostProcessor) - -# dataloader settings -# Here we use Transforms in MMPreTrain to process images -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'category', 'l2-category', 'context', 'index', - 'options_dict', 'options', 'split' - ]) -] - -# The defined MMBench datasets to load evaluation data -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -minigpt_4_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_mmbench_model = dict( - type='minigpt-4', # the test multomodal algorithm, the type can be found in `opencompass/multimodal/models/minigpt_4.py`, `@MM_MODELS.register_module('minigpt-4')` - low_resource=False, - llama_model='/path/to/vicuna-7b/', # the model path of LLM - prompt_constructor=dict(type=MiniGPT4MMBenchPromptConstructor, # the PromptConstructor to construct the prompt - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4MMBenchPostProcessor)) # the PostProcessor to deal with the output, process it into the required format - -# evaluation settings -minigpt_4_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', # the evaluator will dump results to save_path, code can be found in `opencompass/metrics/dump_results.py` - save_path='work_dirs/minigpt-4-7b-mmbench.xlsx') -] - -minigpt_4_mmbench_load_from = '/path/to/prerained_minigpt4_7b.pth' # the model path of linear layer between Q-Former and LLM in MiniGPT-4 -``` diff --git a/docs/en/index.rst b/docs/en/index.rst index f5e77fa3..e3d9ba7c 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -64,7 +64,6 @@ We always welcome *PRs* and *Issues* for the betterment of OpenCompass. advanced_guides/evaluation_lightllm.md advanced_guides/code_eval.md advanced_guides/code_eval_service.md - advanced_guides/multimodal_eval.md advanced_guides/prompt_attack.md advanced_guides/longeval.md advanced_guides/subjective_evaluation.md diff --git a/docs/en/user_guides/framework_overview.md b/docs/en/user_guides/framework_overview.md index 40f36a0b..82f38f17 100644 --- a/docs/en/user_guides/framework_overview.md +++ b/docs/en/user_guides/framework_overview.md @@ -2,7 +2,7 @@ ## Evaluation Targets -The primary evaluation targets of this algorithm library are large language models and multimodal large models. We introduce specific model types for evaluation using the large language model as an example. +The primary evaluation targets of this algorithm library are large language models. We introduce specific model types for evaluation using the large language model as an example. - base Model: Typically obtained through training on massive textual data in a self-supervised manner (e.g., OpenAI's GPT-3, Meta's LLaMA). These models usually have powerful text continuation capabilities. diff --git a/docs/zh_cn/advanced_guides/multimodal_eval.md b/docs/zh_cn/advanced_guides/multimodal_eval.md deleted file mode 100644 index 619875f6..00000000 --- a/docs/zh_cn/advanced_guides/multimodal_eval.md +++ /dev/null @@ -1,107 +0,0 @@ -# 多模态评测 - -我们支持了多个多模态数据集,例如 [MMBench](https://opencompass.org.cn/MMBench),[SEED-Bench](https://github.com/AILab-CVC/SEED-Bench),来对多模态模型进行评测。在开始评测之前,请确保您已经按照官方教程下载了评测数据集。 - -## 开始评测 - -在评测前,您需要先修改 `tasks.py` 或者创建一个类似的新文件 `tasks_your_model.py` 来对您的模型进行评测。 - -一般来说我们使用下列命令启动评测。 - -### Slurm - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval --slurm -p $PARTITION -``` - -### PyTorch - -```sh -cd $root -python run.py configs/multimodal/tasks.py --mm-eval -``` - -## 配置文件 - -We adapt the new config format of [MMEngine](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta). - -### 任务文件 - -这是 `configs/multimodal/tasks.py` 的示例。 - -```python -from mmengine.config import read_base - -with read_base(): - from .minigpt_4.minigpt_4_7b_mmbench import (minigpt_4_mmbench_dataloader, - minigpt_4_mmbench_evaluator, - minigpt_4_mmbench_load_from, - minigpt_4_mmbench_model) - -models = [minigpt_4_mmbench_model] -datasets = [minigpt_4_mmbench_dataloader] -evaluators = [minigpt_4_mmbench_evaluator] -load_froms = [minigpt_4_mmbench_load_from] - -# set the platform and resources -num_gpus = 8 -num_procs = 8 -launcher = 'pytorch' -``` - -### 细节配置 - -这是使用 MMBench 对 MiniGPT-4 进行评测的示例,我们提供了部分注释方便用户理解配置文件的含义。 - -```python -from opencompass.multimodal.models.minigpt_4 import ( - MiniGPT4MMBenchPromptConstructor, MiniGPT4MMBenchPostProcessor) - -# dataloader settings -# 我们使用 MMPreTrain 中的 transforms 对图像数据进行处理 -val_pipeline = [ - dict(type='mmpretrain.torchvision/Resize', - size=(224, 224), - interpolation=3), - dict(type='mmpretrain.torchvision/ToTensor'), - dict(type='mmpretrain.torchvision/Normalize', - mean=(0.48145466, 0.4578275, 0.40821073), - std=(0.26862954, 0.26130258, 0.27577711)), - dict(type='mmpretrain.PackInputs', - algorithm_keys=[ - 'question', 'category', 'l2-category', 'context', 'index', - 'options_dict', 'options', 'split' - ]) -] - -# 定义 MMBench dataset 来读取对应的数据 -dataset = dict(type='opencompass.MMBenchDataset', - data_file='data/mmbench/mmbench_test_20230712.tsv', - pipeline=val_pipeline) - -minigpt_4_mmbench_dataloader = dict(batch_size=1, - num_workers=4, - dataset=dataset, - collate_fn=dict(type='pseudo_collate'), - sampler=dict(type='DefaultSampler', - shuffle=False)) - -# model settings -minigpt_4_mmbench_model = dict( - type='minigpt-4', # 被测试的多模模型,type 在 `opencompass/multimodal/models/minigpt_4.py` 的 `@MM_MODELS.register_module('minigpt-4')` 中有定义 - low_resource=False, - llama_model='/path/to/vicuna-7b/', # LLM 的模型路径 - prompt_constructor=dict(type=MiniGPT4MMBenchPromptConstructor, # 使用 PromptConstructor 来构建 LLM 的输入 prompt - image_prompt='###Human: ', - reply_prompt='###Assistant:'), - post_processor=dict(type=MiniGPT4MMBenchPostProcessor)) # 使用 PostProcessor 来处理模型输出,使其符合输出格式的要求 - -# evaluation settings -minigpt_4_mmbench_evaluator = [ - dict(type='opencompass.DumpResults', # evaluator 将结果保存在 save_path,代码在 `opencompass/metrics/dump_results.py` - save_path='work_dirs/minigpt-4-7b-mmbench.xlsx') -] - -minigpt_4_mmbench_load_from = '/path/to/prerained_minigpt4_7b.pth' # 线性层的模型路径(MiniGPT-4 中 Q-Former 和 LLM 之间的线性投影层) -``` diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst index 0031fe64..6f2d9f21 100644 --- a/docs/zh_cn/index.rst +++ b/docs/zh_cn/index.rst @@ -64,7 +64,6 @@ OpenCompass 上手路线 advanced_guides/evaluation_lightllm.md advanced_guides/code_eval.md advanced_guides/code_eval_service.md - advanced_guides/multimodal_eval.md advanced_guides/prompt_attack.md advanced_guides/longeval.md advanced_guides/subjective_evaluation.md diff --git a/opencompass/cli/main.py b/opencompass/cli/main.py index 0e8f76d8..8d806fc4 100644 --- a/opencompass/cli/main.py +++ b/opencompass/cli/main.py @@ -6,13 +6,12 @@ from datetime import datetime from mmengine.config import Config, DictAction -from opencompass.partitioners import MultimodalNaivePartitioner from opencompass.registry import PARTITIONERS, RUNNERS, build_from_cfg from opencompass.runners import SlurmRunner from opencompass.summarizers import DefaultSummarizer from opencompass.utils import LarkReporter, get_logger -from opencompass.utils.run import (exec_mm_infer_runner, fill_eval_cfg, - fill_infer_cfg, get_config_from_arg) +from opencompass.utils.run import (fill_eval_cfg, fill_infer_cfg, + get_config_from_arg) def parse_args(): @@ -34,11 +33,6 @@ def parse_args(): help='Whether to force tasks to run on dlc. If ' 'True, `--aliyun-cfg` must be set. Defaults' ' to False') - # multi-modal support - parser.add_argument('--mm-eval', - help='Whether or not enable multimodal evaluation', - action='store_true', - default=False) # Add shortcut parameters (models, datasets and summarizer) parser.add_argument('--models', nargs='+', help='', default=None) parser.add_argument('--datasets', nargs='+', help='', default=None) @@ -278,13 +272,6 @@ def main(): 'also specified --slurm or --dlc. ' 'The "infer" configuration will be overridden by ' 'your runtime arguments.') - # Check whether run multimodal evaluation - if args.mm_eval: - partitioner = MultimodalNaivePartitioner( - osp.join(cfg['work_dir'], 'predictions/')) - tasks = partitioner(cfg) - exec_mm_infer_runner(tasks, args, cfg) - return if args.dlc or args.slurm or cfg.get('infer', None) is None: fill_infer_cfg(cfg, args) diff --git a/opencompass/multimodal/datasets/__init__.py b/opencompass/multimodal/datasets/__init__.py deleted file mode 100644 index 39dde918..00000000 --- a/opencompass/multimodal/datasets/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .mmbench import MMBenchDataset # noqa: F401, F403 -from .mme import MMEDataset # noqa: F401, F403 -from .seedbench import SEEDBenchDataset # noqa: F401, F403 - -__all__ = ['MMBenchDataset' - 'SEEDBenchDataset', 'MMEDataset'] diff --git a/opencompass/multimodal/datasets/mmbench.py b/opencompass/multimodal/datasets/mmbench.py deleted file mode 100644 index a5384c4d..00000000 --- a/opencompass/multimodal/datasets/mmbench.py +++ /dev/null @@ -1,84 +0,0 @@ -import base64 -import io -from typing import List, Optional - -import pandas as pd -from mmengine.dataset import Compose -from PIL import Image -from torch.utils.data import Dataset - -from opencompass.registry import DATASETS - - -def decode_base64_to_image(base64_string) -> Image: - """Convert raw data into Pillow image.""" - image_data = base64.b64decode(base64_string) - image = Image.open(io.BytesIO(image_data)) - return image - - -@DATASETS.register_module() -class MMBenchDataset(Dataset): - """Dataset to load MMBench dataset. - - Args: - data_file (str): The path of the dataset. - pipeline (dict): The data augmentation. - sys_prompt (str): The system prompt added to the head - of these options. Defaults to - There are several options: - """ - - def __init__(self, - data_file: str, - pipeline: List[dict], - sys_prompt: str = 'There are several options:') -> None: - self.df = pd.read_csv(data_file, sep='\t') - self.pipeline = Compose(pipeline) - self.sys_prompt = sys_prompt - - def __len__(self) -> None: - return len(self.df) - - def __getitem__(self, idx: int) -> dict: - # Mandatory Fields Begin - index = self.df.iloc[idx]['index'] - image = self.df.iloc[idx]['image'] - image = decode_base64_to_image(image) - question = self.df.iloc[idx]['question'] - - option_candidate = ['A', 'B', 'C', 'D', 'E'] - options = { - cand: self.load_from_df(idx, cand) - for cand in option_candidate - if self.load_from_df(idx, cand) is not None - } - options_prompt = f'{self.sys_prompt}\n' - for key, item in options.items(): - options_prompt += f'{key}. {item}\n' - # Mandatory Fields End - - # Optional Fields Begin - hint = self.load_from_df(idx, 'hint') - category = self.load_from_df(idx, 'category') - l2_catetory = self.load_from_df(idx, 'l2-category') - # Optional Fields End - - data = { - 'img': image, - 'question': question, - 'options': options_prompt, - 'category': category, - 'l2-category': l2_catetory, - 'options_dict': options, - 'index': index, - 'context': hint, - } - data = self.pipeline(data) - return data - - def load_from_df(self, idx: int, key: str) -> Optional[str]: - if key in self.df.iloc[idx] and not pd.isna(self.df.iloc[idx][key]): - return self.df.iloc[idx][key] - else: - return None diff --git a/opencompass/multimodal/datasets/mme.py b/opencompass/multimodal/datasets/mme.py deleted file mode 100644 index c5105175..00000000 --- a/opencompass/multimodal/datasets/mme.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -from typing import List - -from mmengine.dataset import Compose -from torch.utils.data import Dataset - -from opencompass.registry import DATASETS - - -@DATASETS.register_module() -class MMEDataset(Dataset): - """Dataset to load MME dataset. - - Args: - data_dir (str): The path of the dataset. - pipeline (List[dict]): The data augmentation. - """ - tasks = [ - 'artwork', 'celebrity', 'code_reasoning', 'color', - 'commonsense_reasoning', 'count', 'existence', 'landmark', - 'numerical_calculation', 'OCR', 'position', 'posters', 'scene', - 'text_translation' - ] - sub_dir_name = ('images', 'questions_answers_YN') - - def __init__(self, data_dir: str, pipeline: List[dict]) -> None: - self.pipeline = Compose(pipeline) - self.load_data(data_dir) - - def load_data(self, data_dir: str): - self.data_list = [] - image_dir, question_dir = self.sub_dir_name - for task in self.tasks: - if os.path.exists(os.path.join(data_dir, task, question_dir)): - q_list = os.listdir(os.path.join(data_dir, task, question_dir)) - i_list = os.listdir(os.path.join(data_dir, task, image_dir)) - q_prefix = os.path.join(data_dir, task, question_dir) - i_prefix = os.path.join(data_dir, task, image_dir) - else: - fn_list = os.listdir(os.path.join(data_dir, task)) - q_list = [fn for fn in fn_list if '.txt' in fn] - i_list = [fn for fn in fn_list if fn not in q_list] - q_prefix = i_prefix = os.path.join(data_dir, task) - - q_list.sort() - i_list.sort() - assert len(q_list) == len(i_list) - for q_fn, i_fn in zip(q_list, i_list): - assert q_fn.split('.')[0] == i_fn.split('.')[0] - q_path = os.path.join(q_prefix, q_fn) - image_path = os.path.join(i_prefix, i_fn) - with open(q_path, 'r') as f: - q1, a1 = f.readline().strip().split('\t') - q2, a2 = f.readline().strip().split('\t') - self.data_list.append({ - 'img_path': image_path, - 'question': q1, - 'answer': a1, - 'task': task - }) - self.data_list.append({ - 'img_path': image_path, - 'question': q2, - 'answer': a2, - 'task': task - }) - - def __len__(self) -> None: - return len(self.data_list) - - def __getitem__(self, idx: int) -> dict: - data_sample = self.data_list[idx] - data_sample = self.pipeline(data_sample) - return data_sample diff --git a/opencompass/multimodal/datasets/seedbench.py b/opencompass/multimodal/datasets/seedbench.py deleted file mode 100644 index 068d2bca..00000000 --- a/opencompass/multimodal/datasets/seedbench.py +++ /dev/null @@ -1,174 +0,0 @@ -import importlib -import json -import os.path as osp -from typing import List - -import numpy as np -import torch -from decord import VideoReader, cpu -from mmengine.dataset import Compose -from PIL import Image -from torch.utils.data import Dataset - -from opencompass.registry import DATASETS - - -@DATASETS.register_module() -class SEEDBenchDataset(Dataset): - """Dataset to load SEED-Bench dataset. - - Args: - ann_file (str): The path of the annotation file. - cc3m_path (str): The data path of the image dimension(1-9). - sthv2_path (str): The data path of the dimension 10. - epic_kitchens_path (str): The data path of the dimension 11. - breakfast_path (str): The data path of the dimension 12. - image_pipeline (List[dict]): The data transforms for image. - video_pipeline (List[dict]): The data transforms for video. - only_image (bool): Whether run SEED-Bench only with image data. - Defaults to True. - """ - - def __init__( - self, - ann_file: str, - cc3m_path: str, - sthv2_path: str, - epic_kitchens_path: str, - breakfast_path: str, - image_pipeline: List[dict], - video_pipeline: List[dict], - only_image: bool = True, - ) -> None: - ann_file = json.load(open(ann_file, 'rb')) - if 'questions' in ann_file.keys(): - self.ann_file = ann_file['questions'] - self.cc3m_path = cc3m_path - self.sthv2_path = sthv2_path - self.epic_kitchens_path = epic_kitchens_path - self.breakfast_path = breakfast_path - self.image_pipeline = Compose(image_pipeline) - if only_image: - image_ann_file = [ - ann for ann in self.ann_file if ann['data_type'] == 'image' - ] - self.ann_file = image_ann_file - if not only_image: - raise NotImplementedError - self.video_pipeline = Compose(video_pipeline) - - def __len__(self) -> None: - return len(self.ann_file) - - def __getitem__(self, idx: str) -> dict: - item = self.ann_file[idx] - data = { - 'question': - item['question'], - 'answer': - item['answer'], - 'choices': [ - item['choice_a'], item['choice_b'], item['choice_c'], - item['choice_d'] - ], - 'data_type': - item['data_type'], - 'question_id': - item['question_id'], - 'question_type_id': - item['question_type_id'], - 'index': - idx, - } - - if item['data_type'] == 'image': - data_path = osp.join(self.cc3m_path, item['data_id']) - raw_image = Image.open(open(data_path, 'rb')).convert('RGB') - data['data_path'] = data_path - data['img'] = raw_image - data = self.image_pipeline(data) - elif item['data_type'] == 'video': - if item['question_type_id'] == 10: - data_path = osp.join(self.sthv2_path, item['data_id']) - data['data_path'] = data_path - elif item['question_type_id'] == 11: - data_path = osp.join(self.epic_kitchens_path, item['data_id']) - data['data_path'] = data_path - data['segment'] = item['segment'] - elif item['question_type_id'] == 12: - data_path = osp.join(self.breakfast_path, item['data_id']) - data['data_path'] = data_path - data['segment'] = item['segment'] - else: - raise ValueError('The question type id is not valid.') - - # preprocessing videos in evaluation dimension 10-12 - use_pyav = False - if 'segment' in data.keys(): - segment = data['segment'] - if isinstance(segment[0], int): - # using pyav for decoding videos in evaluation dimension 12 - use_pyav = True - start, end = segment[0], segment[1] - else: - start = 0.0 - end = 0.0 - - if use_pyav: - # using pyav for videos in evaluation dimension 12 - av = importlib.importmodule('av') - reader = av.open(data_path) - frames = [ - torch.from_numpy(f.to_rgb().to_ndarray()) - for f in reader.decode(video=0) - ] - video_len = len(frames) - start_frame, end_frame = start, end - end_frame = min(end_frame, video_len) - offset = self.get_index(end_frame - start_frame, 8) - frame_indices = offset + start_frame - buffer = torch.stack([frames[idx] for idx in frame_indices]) - buffer = buffer.numpy() - else: - # using decord for videos in evaluating dimension 10-11 - import io - - import mmengine.fileio as fileio - file_obj = io.BytesIO(fileio.get(data_path)) - vr = VideoReader(file_obj, num_threads=1, ctx=cpu(0)) - video_len = len(vr) - fps = vr.get_avg_fps() - if 'segment' in data.keys(): - # obtain start and end frame for the video segment - # in evaluation dimension 11 - start_frame = int(min(max(start * fps, 0), video_len - 1)) - end_frame = int(min(max(end * fps, 0), video_len - 1)) - tot_frames = int(end_frame - start_frame) - offset = self.get_index(tot_frames, 8) - frame_indices = offset + start_frame - else: - # sample frames of the video in evaluation dimension 10 - frame_indices = self.get_index(video_len - 1, 8) - vr.seek(0) - buffer = vr.get_batch(frame_indices) - buffer = buffer.asnumpy() - data['imgs'] = buffer - data = self.video_pipeline(data) - - else: - raise ValueError('The data type is not valid.') - - return data - - def get_index(self, num_frames, num_segments): - if num_segments > num_frames: - offsets = np.array([idx for idx in range(num_frames)]) - else: - # uniform sampling - seg_size = float(num_frames - 1) / num_segments - start = int(seg_size / 2) - offsets = np.array([ - start + int(np.round(seg_size * idx)) - for idx in range(num_segments) - ]) - return offsets diff --git a/opencompass/multimodal/models/__init__.py b/opencompass/multimodal/models/__init__.py deleted file mode 100644 index 1157c33d..00000000 --- a/opencompass/multimodal/models/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -import os.path as osp - -from opencompass.utils import satisfy_requirement - -if satisfy_requirement('salesforce-lavis'): - from .instructblip import * # noqa: F401, F403 - -if osp.exists('opencompass/multimodal/models/minigpt_4/MiniGPT-4'): - from .minigpt_4 import * # noqa: F401, F403 - -if osp.exists( - 'opencompass/multimodal/models/llama_adapter_v2_multimodal/LLaMA-Adapter' # noqa -): - from .llama_adapter_v2_multimodal import * # noqa: F401, F403 - -from .llava import * # noqa: F401, F403 - -if osp.exists('opencompass/multimodal/models/mplug_owl/mPLUG-Owl'): - from .mplug_owl import * # noqa: F401, F403 - -from .openflamingo import * # noqa: F401, F403 -from .otter import * # noqa: F401, F403 -from .qwen import * # noqa: F401, F403 -from .visualglm import * # noqa: F401, F403 diff --git a/opencompass/multimodal/models/instructblip/__init__.py b/opencompass/multimodal/models/instructblip/__init__.py deleted file mode 100644 index 6505ec42..00000000 --- a/opencompass/multimodal/models/instructblip/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .blip2_vicuna_instruct import InstructBlipInferencer -from .post_processor import (InstructBlipCOCOCaptionPostProcessor, - InstructBlipMMBenchPostProcessor, - InstructBlipScienceQAPostProcessor, - InstructBlipVQAPostProcessor, - InstructBlipVSRPostProcessor) -from .prompt_constructor import (InstructBlipCOCOCaotionPromptConstructor, - InstructBlipMMBenchPromptConstructor, - InstructBlipScienceQAPromptConstructor, - InstructBlipVQAPromptConstructor, - InstructBlipVSRPromptConstructor) - -__all__ = [ - 'InstructBlipInferencer', - 'InstructBlipMMBenchPromptConstructor', - 'InstructBlipMMBenchPostProcessor', - 'InstructBlipCOCOCaotionPromptConstructor', - 'InstructBlipCOCOCaptionPostProcessor', - 'InstructBlipVQAPromptConstructor', - 'InstructBlipVQAPostProcessor', - 'InstructBlipScienceQAPromptConstructor', - 'InstructBlipScienceQAPostProcessor', - 'InstructBlipVSRPromptConstructor', - 'InstructBlipVSRPostProcessor', -] diff --git a/opencompass/multimodal/models/instructblip/blip2_vicuna_instruct.py b/opencompass/multimodal/models/instructblip/blip2_vicuna_instruct.py deleted file mode 100644 index 0b91cf24..00000000 --- a/opencompass/multimodal/models/instructblip/blip2_vicuna_instruct.py +++ /dev/null @@ -1,248 +0,0 @@ -"""Requires Transformer 4.28 and above, implementation may change according the -Llama implementation.""" -import logging - -import mmengine -import torch -import torch.nn as nn -from lavis.models.blip2_models.blip2 import Blip2Base, disabled_train -from mmengine.device import get_device -from transformers import LlamaForCausalLM, LlamaTokenizer - -from opencompass.registry import MM_MODELS - - -@MM_MODELS.register_module('blip2-vicuna-instruct') -class InstructBlipInferencer(Blip2Base): - - def __init__( - self, - prompt_constructor: dict, - post_processor: dict, - vit_model: str = 'eva_clip_g', - img_size: int = 224, - drop_path_rate: float = 0, - use_grad_checkpoint: bool = False, - vit_precision: str = 'fp16', - freeze_vit: bool = True, - num_query_token: int = 32, - llm_model: str = '', - sys_prompt: str = '', - prompt: str = '', - max_txt_len: int = 128, - max_output_txt_len: int = 256, - qformer_text_input: bool = True, - low_resource: bool = False, - mode: str = 'generation', - is_caption_task=False, - ): - super().__init__() - self.mode = mode - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - - self.tokenizer = self.init_tokenizer(truncation_side='left') - - self.visual_encoder, self.ln_vision = self.init_vision_encoder( - vit_model, img_size, drop_path_rate, use_grad_checkpoint, - vit_precision) - if freeze_vit: - for name, param in self.visual_encoder.named_parameters(): - param.requires_grad = False - self.visual_encoder = self.visual_encoder.eval() - self.visual_encoder.train = disabled_train - logging.info('freeze vision encoder') - - self.Qformer, self.query_tokens = self.init_Qformer( - num_query_token, self.visual_encoder.num_features) - - if not qformer_text_input: - self.Qformer.bert.embeddings.word_embeddings = None - self.Qformer.bert.embeddings.position_embeddings = None - for layer in self.Qformer.bert.encoder.layer: - layer.output = None - layer.intermediate = None - else: - self.Qformer.resize_token_embeddings(len(self.tokenizer)) - self.Qformer.cls = None - - self.llm_tokenizer = LlamaTokenizer.from_pretrained( - llm_model, use_fast=False, truncation_side='left') - - if low_resource: - self.llm_model = LlamaForCausalLM.from_pretrained( - llm_model, - torch_dtype=torch.float16, - load_in_8bit=True, - device_map={'': 0}) - else: - self.llm_model = LlamaForCausalLM.from_pretrained( - llm_model, torch_dtype=torch.float16) - self.llm_tokenizer.add_special_tokens({'pad_token': '[PAD]'}) - self.llm_tokenizer.add_special_tokens({'bos_token': ''}) - self.llm_tokenizer.add_special_tokens({'eos_token': ''}) - self.llm_tokenizer.add_special_tokens({'unk_token': ''}) - - self.llm_model.resize_token_embeddings(len(self.llm_tokenizer)) - - for name, param in self.llm_model.named_parameters(): - param.requires_grad = False - - self.llm_proj = nn.Linear(self.Qformer.config.hidden_size, - self.llm_model.config.hidden_size) - - self.max_txt_len = max_txt_len - self.max_output_txt_len = max_output_txt_len - self.sys_prompt = sys_prompt - self.prompt = prompt - self.is_caption_task = is_caption_task - - self._lemmatizer = None - - self.qformer_text_input = qformer_text_input - - def forward(self, batch): - if self.mode == 'generation': - return self.generate(batch) - else: - raise RuntimeError(f'Invalid mode "{self.mode}".') - - def concat_text_input_output(self, input_ids, input_atts, output_ids, - output_atts): - input_part_targets_len = [] - llm_tokens = {'input_ids': [], 'attention_mask': []} - for i in range(input_ids.size(0)): - this_input_ones = input_atts[i].sum() - input_part_targets_len.append(this_input_ones) - llm_tokens['input_ids'].append( - torch.cat([ - input_ids[i][:this_input_ones], output_ids[i][1:], - input_ids[i][this_input_ones:] - ])) - llm_tokens['attention_mask'].append( - torch.cat([ - input_atts[i][:this_input_ones], output_atts[i][1:], - input_atts[i][this_input_ones:] - ])) - llm_tokens['input_ids'] = torch.stack(llm_tokens['input_ids']) - llm_tokens['attention_mask'] = torch.stack( - llm_tokens['attention_mask']) - return llm_tokens, input_part_targets_len - - def pack_inputs(self, batch): - images = [image.unsqueeze(0) for image in batch['inputs']] - data_samples = [data_sample for data_sample in batch['data_samples']] - images = torch.cat(images, dim=0).to(get_device()) - inputs = {'image': images, 'data_samples': data_samples} - return inputs - - @torch.no_grad() - def generate( - self, - batch, - use_nucleus_sampling=False, - num_beams=5, - max_length=256, - min_length=1, - top_p=0.9, - repetition_penalty=1.5, - length_penalty=1, - num_captions=1, - temperature=1, - ): - inputs = self.pack_inputs(batch) - inputs = self.prompt_constructor(inputs) - image = inputs['image'] - prompt = inputs['prompt'] - data_samples = inputs['data_samples'] - - self.llm_tokenizer.padding_side = 'left' - - bs = image.size(0) - - if isinstance(prompt, str): - prompt = [prompt] * bs - else: - assert len( - prompt - ) == bs, 'The number of prompts must be equal to the batch size.' - - query_tokens = self.query_tokens.expand(bs, -1, -1) - if self.qformer_text_input: - text_Qformer = self.tokenizer( - prompt, - padding='longest', - truncation=True, - max_length=self.max_txt_len, - return_tensors='pt', - ).to(image.device) - query_atts = torch.ones(query_tokens.size()[:-1], - dtype=torch.long).to(image.device) - Qformer_atts = torch.cat([query_atts, text_Qformer.attention_mask], - dim=1) - - with self.maybe_autocast(): - image_embeds = self.ln_vision(self.visual_encoder(image)) - image_atts = torch.ones(image_embeds.size()[:-1], - dtype=torch.long).to(image.device) - - if self.qformer_text_input: - query_output = self.Qformer.bert( - text_Qformer.input_ids, - attention_mask=Qformer_atts, - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_atts, - return_dict=True, - ) - else: - query_output = self.Qformer.bert( - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_atts, - return_dict=True, - ) - - inputs_llm = self.llm_proj( - query_output.last_hidden_state[:, :query_tokens.size(1), :]) - atts_llm = torch.ones(inputs_llm.size()[:-1], - dtype=torch.long).to(image.device) - - prompt = ['###Human: ' + p + '###Assistant:' for p in prompt] - prompt = [self.sys_prompt + p for p in prompt] - llm_tokens = self.llm_tokenizer(prompt, - padding='longest', - return_tensors='pt').to(image.device) - - with self.maybe_autocast(): - inputs_embeds = self.llm_model.get_input_embeddings()( - llm_tokens.input_ids) - inputs_embeds = torch.cat([inputs_llm, inputs_embeds], dim=1) - attention_mask = torch.cat([atts_llm, llm_tokens.attention_mask], - dim=1) - - outputs = self.llm_model.generate( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - do_sample=use_nucleus_sampling, - top_p=top_p, - temperature=temperature, - num_beams=num_beams, - max_length=self.max_output_txt_len, - min_length=min_length, - repetition_penalty=repetition_penalty, - length_penalty=length_penalty, - num_return_sequences=num_captions, - ) - - for i, data_sample in enumerate(data_samples): - output_token = outputs[i] - output_text = self.post_processor(output_token, self.llm_tokenizer) - if self.is_caption_task: - data_sample.pred_caption = output_text - else: - data_sample.pred_answer = output_text - data_samples[i] = data_sample - return data_samples diff --git a/opencompass/multimodal/models/instructblip/post_processor.py b/opencompass/multimodal/models/instructblip/post_processor.py deleted file mode 100644 index b67949f7..00000000 --- a/opencompass/multimodal/models/instructblip/post_processor.py +++ /dev/null @@ -1,111 +0,0 @@ -import random -import re - -import torch - - -class InstructBlipMMBenchPostProcessor: - """"Post processor for MiniGPT-4 on MMBench.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - # convert output id 0 to 2 (eos_token_id) - output_token[output_token == 0] = 2 - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = self._extract_key_words(output_text.strip()) - return output_text - - def _extract_key_words(self, output_text: str) -> str: - - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - output_text = output_text.strip('') - output_text = output_text.strip('') - output_text = output_text.strip() - pattern = re.compile(r'([A-Z]\.)') - res = pattern.findall(output_text) - if len(res) > 0: - output_text = res[0][:-1] - return output_text - - -class InstructBlipCOCOCaptionPostProcessor: - """"Post processor for InstructBlip on COCO Caption.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - output_token[output_token == 0] = 2 - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - output_text = output_text.strip('') - output_text = output_text.strip('') - output_text = output_text.strip() - return output_text - - -class InstructBlipVQAPostProcessor: - """"Post processor for InstructBlip on VQA.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - output_token[output_token == 0] = 2 - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - output_text = output_text.strip('') - output_text = output_text.strip('') - output_text = output_text.strip() - return output_text - - -class InstructBlipScienceQAPostProcessor: - """"Post processor for InstructBlip on ScienceQA.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - output_token[output_token == 0] = 2 - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - output_text = output_text.strip('') - output_text = output_text.strip('') - output_text = output_text.strip() - pattern = re.compile(r'\(([A-Z])\)') - output_text = pattern.findall(output_text) - if len(output_text) == 0: - output_text = random.choice(['A', 'B', 'C', 'D']) - else: - output_text = output_text[0] - return output_text - - -class InstructBlipVSRPostProcessor: - """"Post processor for InstructBlip on VSR.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - output_token[output_token == 0] = 2 - output_text = tokenizer.decode(output_token, add_special_tokens=False) - pattern = r'yes|no|Yes|No' - output_text = re.findall(pattern, output_text) - if len(output_text) > 0: - output_text = output_text[0].lower() - return output_text diff --git a/opencompass/multimodal/models/instructblip/prompt_constructor.py b/opencompass/multimodal/models/instructblip/prompt_constructor.py deleted file mode 100644 index 818b7a93..00000000 --- a/opencompass/multimodal/models/instructblip/prompt_constructor.py +++ /dev/null @@ -1,122 +0,0 @@ -from typing import List - -from mmpretrain.structures import DataSample - - -class InstructBlipMMBenchPromptConstructor: - """Prompt constructor for InstructBlip on MMBench. - - Args: - image_prompt (str): Image prompt. - reply_prompt (str): Reply prompt. - """ - - def __init__(self, image_prompt: str = '', reply_prompt: str = '') -> None: - self.image_prompt = image_prompt - self.reply_prompt = reply_prompt - - def __call__(self, inputs: dict) -> dict: - """Construct prompt. - - Args: - inputs (dict): Input data containing image and data_samples. - - Returns: - dict: A dict containing prompt, images and data_samples. - """ - data_samples = inputs['data_samples'] - prompt = self._process(data_samples) - inputs.update({'prompt': prompt}) - - return inputs - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - options = [data_sample.get('options') for data_sample in data_samples] - contexts = [data_sample.get('context') for data_sample in data_samples] - question = questions[0] - option = options[0] - context = contexts[0] - if context is not None: - prompt = self.image_prompt + ' ' + context + ' ' + question + ' ' + option + ' ' + self.reply_prompt # noqa - else: - prompt = self.image_prompt + ' ' + question + ' ' + option + ' ' + self.reply_prompt # noqa - return prompt - - -class InstructBlipCOCOCaotionPromptConstructor( - InstructBlipMMBenchPromptConstructor): - """Prompt constructor for InstructBlip on COCO Caption.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - prompt = self.image_prompt + ' ' + 'a photo of' + self.reply_prompt - return prompt - - -class InstructBlipVQAPromptConstructor(InstructBlipMMBenchPromptConstructor): - """Prompt constructor for InstructBlip on VQA.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = self.image_prompt + ' ' + question + ' ' + 'Answer this question in a single word.' + ' ' + self.reply_prompt # noqa - return prompt - - -class InstructBlipScienceQAPromptConstructor( - InstructBlipMMBenchPromptConstructor): - """Prompt constructor for InstructBlip on ScienceQA.""" - - choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'} - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - 'Question: ' + data_sample.get('question') + '\n' - for data_sample in data_samples - ] # noqa - choices = [data_sample.get('choices') for data_sample in data_samples] - choices = [[ - f'({self.choice_mapping[i]}) ' + item - for i, item in enumerate(choice) - ] for choice in choices] - choices = [ - 'Choices: ' + ' '.join(choice) + '\n' for choice in choices - ] # noqa - contexts = [ - 'Context: ' + data_sample.get('hint') + '\n' - for data_sample in data_samples - ] # noqa - question = questions[0] - choice = choices[0] - context = contexts[0] - prompt = self.image_prompt + ' ' + context + ' ' + question + ' ' + choice + self.reply_prompt + ' ' + 'The answer is' # noqa - return prompt - - -class InstructBlipVSRPromptConstructor(InstructBlipMMBenchPromptConstructor): - """Prompt constructor for InstructBlip on VSR.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = self.image_prompt + ' ' + question + ' ' + 'Is the above description correct? Answer yes or no.' + ' ' + self.reply_prompt # noqa - return prompt diff --git a/opencompass/multimodal/models/llama_adapter_v2_multimodal/__init__.py b/opencompass/multimodal/models/llama_adapter_v2_multimodal/__init__.py deleted file mode 100644 index 34c55ee8..00000000 --- a/opencompass/multimodal/models/llama_adapter_v2_multimodal/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .llama_adapter import LLaMA_adapter_v2 -from .post_processor import LlamaAadapterMMBenchPostProcessor -from .prompt_constructor import LlamaAadapterMMBenchPromptConstructor # noqa - -__all__ = [ - 'LLaMA_adapter_v2', 'LlamaAadapterMMBenchPostProcessor', - 'LlamaAadapterMMBenchPromptConstructor' -] diff --git a/opencompass/multimodal/models/llama_adapter_v2_multimodal/llama_adapter.py b/opencompass/multimodal/models/llama_adapter_v2_multimodal/llama_adapter.py deleted file mode 100644 index 76bac324..00000000 --- a/opencompass/multimodal/models/llama_adapter_v2_multimodal/llama_adapter.py +++ /dev/null @@ -1,337 +0,0 @@ -import json -import os -import os.path as osp -import sys -from pathlib import Path - -import clip -import mmengine -import torch -import torch.nn as nn -from mmengine.device import get_device -from timm.models.vision_transformer import Block - -from opencompass.registry import MM_MODELS - - -def load_package(): - """Load required packages from llama_adapter_v2_multimodal7b.""" - current_file_path = os.path.abspath(__file__) - current_folder_path = os.path.dirname(current_file_path) - - sys.path.append(os.path.join(current_folder_path, 'LLaMA-Adapter')) # noqa - from llama_adapter_v2_multimodal7b.llama.llama import (ModelArgs, - Transformer) - from llama_adapter_v2_multimodal7b.llama.tokenizer import Tokenizer - from llama_adapter_v2_multimodal7b.llama.utils import sample_top_p - sys.path.pop(-1) - - return ModelArgs, Transformer, Tokenizer, sample_top_p - - -ModelArgs, Transformer, Tokenizer, sample_top_p = load_package() - - -class LLaMA_adapter(nn.Module): - - def __init__(self, - llama_ckpt_dir, - llama_tokenizer, - max_seq_len=512, - max_batch_size=1, - clip_model='ViT-L/14', - v_embed_dim=768, - v_depth=8, - v_num_heads=16, - v_mlp_ratio=4.0, - query_len=10, - query_layer=31, - w_bias=False, - w_lora=False, - lora_rank=16, - prompt_constructor=None, - post_processor=None): - super().__init__() - - self.device = get_device() - # load llama configs - with open(os.path.join(llama_ckpt_dir, 'params.json'), 'r') as f: - params = json.loads(f.read()) - model_args = ModelArgs(max_seq_len=max_seq_len, - max_batch_size=max_batch_size, - **params) - - # 1. clip and clip projector - self.clip, self.clip_transform = clip.load(clip_model) - - clip_dim = self.clip.visual.proj.shape[1] - self.clip_proj = nn.Linear(clip_dim, v_embed_dim) - self.clip_proj_norm = nn.LayerNorm(v_embed_dim) - - self.query_len = query_len - self.query_layer = query_layer - - # 2. visual query, blocks and projector - self.visual_query = nn.Embedding(query_len, v_embed_dim) - self.visual_blocks = nn.ModuleList([ - Block(v_embed_dim, v_num_heads, v_mlp_ratio, qkv_bias=True) - for _ in range(v_depth) - ]) - self.visual_proj = nn.Linear(v_embed_dim, model_args.dim) - self.visual_proj_norm = nn.LayerNorm(model_args.dim) - - # 3. adapter query - self.adapter_query = nn.Embedding(query_len * query_layer, - model_args.dim) - - # 4. tokenizer - self.tokenizer = Tokenizer(model_path=llama_tokenizer) - - # 5. llama - model_args.vocab_size = self.tokenizer.n_words - model_args.w_bias = w_bias - model_args.w_lora = w_lora - model_args.lora_rank = lora_rank - torch.set_default_tensor_type(torch.cuda.HalfTensor) - self.llama = Transformer(model_args) - torch.set_default_tensor_type(torch.FloatTensor) - - ckpts = sorted(Path(llama_ckpt_dir).glob('*.pth')) - for ckpt in ckpts: - ckpt = torch.load(ckpt, map_location='cpu') - self.llama.load_state_dict(ckpt, strict=False) - - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - if post_processor is not None: - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - - def clip_encode_image(self, x): - # modified from CLIP - x = self.clip.visual.conv1(x) # shape = [*, width, grid, grid] - # shape = [*, width, grid ** 2] - x = x.reshape(x.shape[0], x.shape[1], -1) - x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] - x = torch.cat([ - self.clip.visual.class_embedding.to(x.dtype) + torch.zeros( - x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x - ], - dim=1) # shape = [*, grid ** 2 + 1, width] - x = x + self.clip.visual.positional_embedding.to(x.dtype) - x = self.clip.visual.ln_pre(x) - - x = x.permute(1, 0, 2) # NLD -> LND - x = self.clip.visual.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - - # preserve all spatial tokens - x = self.clip.visual.ln_post(x[:, :, :]) - - if self.clip.visual.proj is not None: - x = x @ self.clip.visual.proj - - return x - - def forward_visual(self, imgs): - clip_feats = self.clip_encode_image(imgs) - clip_feats = self.clip_proj_norm(self.clip_proj(clip_feats.float())) - - visual_query = self.visual_query.weight.unsqueeze(0).repeat( - len(imgs), 1, 1) - visual_query = torch.cat([visual_query, clip_feats], dim=1) - for block in self.visual_blocks: - visual_query = block(visual_query) - - visual_query = visual_query[:, :self.query_len, :] - visual_query = self.visual_proj(visual_query) - visual_query = self.visual_proj_norm(visual_query) - - return visual_query - - @torch.inference_mode() - def forward(self, visual_query, tokens, start_pos: int): - _bsz, seqlen = tokens.shape - h = self.llama.tok_embeddings(tokens) - freqs_cis = self.llama.freqs_cis.to(h.device) - freqs_cis = freqs_cis[start_pos:start_pos + seqlen] - mask = None - mask = torch.full((1, 1, seqlen, seqlen), - float('-inf'), - device=h.device) - mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h) - - for layer in self.llama.layers[:-1 * self.query_layer]: - h = layer(h, start_pos, freqs_cis, mask) - - adapter = self.adapter_query.weight.reshape(self.query_layer, - self.query_len, - -1).unsqueeze(1) - adapter_index = 0 - for layer in self.llama.layers[-1 * self.query_layer:]: - dynamic_adapter = adapter[adapter_index].repeat(_bsz, 1, 1) - dynamic_adapter = dynamic_adapter + visual_query - h = layer(h, start_pos, freqs_cis, mask, dynamic_adapter) - adapter_index = adapter_index + 1 - - h = self.llama.norm(h) - output = self.llama.output(h[:, -1, :]) - - return output.float() - - def pack_inputs(self, batch): - images = [image.unsqueeze(0) for image in batch['inputs']] - data_samples = [data_sample for data_sample in batch['data_samples']] - images = torch.cat(images, dim=0).to(get_device()) - inputs = {'image': images, 'data_samples': data_samples} - return inputs - - @torch.inference_mode() - def generate(self, batch): - max_gen_len = 256 - temperature = 0.1 - top_p = 0.75 - inputs = self.pack_inputs(batch) - inputs = self.prompt_constructor(inputs) - image = inputs['image'] - prompts = inputs['prompt'] - data_samples = inputs['data_samples'] - - data_sample = data_samples[0] - - imgs = image - - # import pdb;pdb.set_trace() - bsz = len(imgs) - params = self.llama.params - - with torch.cuda.amp.autocast(): - visual_query = self.forward_visual(imgs) - - # import pdb;pdb.set_trace() - if isinstance(prompts[0], str): - prompts = [ - self.tokenizer.encode(x, bos=True, eos=False) for x in prompts - ] - - # import pdb;pdb.set_trace() - min_prompt_size = min([len(t) for t in prompts]) - max_prompt_size = max([len(t) for t in prompts]) - - total_len = min(params.max_seq_len, max_gen_len + max_prompt_size) - - tokens = torch.full((bsz, total_len), - self.tokenizer.pad_id).cuda().long() - - # import pdb;pdb.set_trace() - for k, t in enumerate(prompts): - if len(t) <= total_len: - tokens[k, :len(t)] = torch.tensor(t).cuda().long() - else: - tokens[k, :total_len] = torch.tensor( - t[:total_len]).cuda().long() - - input_text_mask = tokens != self.tokenizer.pad_id - start_pos = min_prompt_size - prev_pos = 0 - for cur_pos in range(start_pos, total_len): - with torch.cuda.amp.autocast(): - logits = self.forward(visual_query, - tokens[:, prev_pos:cur_pos], prev_pos) - if temperature > 0: - probs = torch.softmax(logits / temperature, dim=-1) - next_token = sample_top_p(probs, top_p) - else: - next_token = torch.argmax(logits, dim=-1) - next_token = next_token.reshape(-1) - - next_token = torch.where(input_text_mask[:, cur_pos], - tokens[:, cur_pos], next_token) - tokens[:, cur_pos] = next_token - # trick: early stop if bsz==1 - if bsz == 1 and next_token[0] == self.tokenizer.eos_id: - break - prev_pos = cur_pos - - decoded = [] - for i, t in enumerate(tokens.tolist()): - - # cut to max gen len - t = t[len(prompts[i]):len(prompts[i]) + max_gen_len] - # cut to eos tok if any - try: - t = t[:t.index(self.tokenizer.eos_id)] - except ValueError: - pass - decoded.append(self.tokenizer.decode(t)) - - output_text = self.post_processor(decoded[0]) - data_sample.pred_answer = output_text - return data_sample - - -@MM_MODELS.register_module('LLaMA-adapter-v2') -class LLaMA_adapter_v2(nn.Module): - - def __init__(self, - llama_dir, - prompt_constructor: dict, - post_processor: dict, - model_path: str = 'llama_adapter_v2_multimodal7b', - name: str = 'LORA-BIAS-7B', - mode: str = 'generation', - device='cuda' if torch.cuda.is_available() else 'cpu', - download_root='ckpts'): - super().__init__() - - assert name in ['LORA-BIAS-7B', 'BIAS-7B', 'CAPTION-7B'] - # BIAS-7B or https://xxx/sha256_BIAS-7B.pth -> 7B - llama_type = name.split('.')[0].split('-')[-1] - llama_ckpt_dir = os.path.join(llama_dir, llama_type) - llama_tokenzier_path = os.path.join(llama_dir, 'tokenizer.model') - - # load llama_adapter weights and model_cfg - print(f'Loading LLaMA-Adapter from {llama_dir}') - - current_file_path = os.path.abspath(__file__) - current_folder_path = os.path.dirname(current_file_path) - model_path = osp.join(current_folder_path, 'LLaMA-Adapter', model_path) - ckpt_root = osp.join(model_path, download_root) - ckpt_map = { - 'LORA-BIAS-7B': - '1bcbffc43484332672092e0024a8699a6eb5f558161aebf98a7c6b1db67224d1_LORA-BIAS-7B.pth', # noqa: E501 - 'BIAS-7B': - '7fa55208379faf2dd862565284101b0e4a2a72114d6490a95e432cf9d9b6c813_BIAS-7B.pth', # noqa: E501 - 'CAPTION-7B': - '5088aeb63a89746b90bcfd5cb819e1c7411b2771b267c6d131ce73e250a8abf0_CAPTION-7B.pth' # noqa: E501 - } - ckpt = torch.load(osp.join(ckpt_root, ckpt_map[name]), - map_location='cpu') - - model_cfg = ckpt.get('config', {}) - - self.model = LLaMA_adapter( - llama_ckpt_dir, - llama_tokenzier_path, - max_seq_len=512, - max_batch_size=1, - clip_model='ViT-L/14', - v_embed_dim=768, - v_depth=8, - v_num_heads=16, - v_mlp_ratio=4.0, - query_len=10, - query_layer=31, - w_bias=model_cfg.get('w_bias', False), - w_lora=model_cfg.get('w_lora', False), - lora_rank=model_cfg.get('lora_rank', 16), - prompt_constructor=prompt_constructor, - post_processor=post_processor, - ) - - self.model.load_state_dict(ckpt['model'], strict=False) - self.mode = mode - - def forward(self, batch): - if self.mode == 'generation': - return self.model.generate(batch) diff --git a/opencompass/multimodal/models/llama_adapter_v2_multimodal/post_processor.py b/opencompass/multimodal/models/llama_adapter_v2_multimodal/post_processor.py deleted file mode 100644 index 60909a3d..00000000 --- a/opencompass/multimodal/models/llama_adapter_v2_multimodal/post_processor.py +++ /dev/null @@ -1,15 +0,0 @@ -import torch - - -class LlamaAadapterMMBenchPostProcessor: - """"Post processor for Llama Aadapter V2 on MMBench.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor) -> str: - - if len(output_token) >= 2: - if output_token[1] == '.': - output_token = output_token[2:].strip() - return output_token diff --git a/opencompass/multimodal/models/llama_adapter_v2_multimodal/prompt_constructor.py b/opencompass/multimodal/models/llama_adapter_v2_multimodal/prompt_constructor.py deleted file mode 100644 index 50b47b8f..00000000 --- a/opencompass/multimodal/models/llama_adapter_v2_multimodal/prompt_constructor.py +++ /dev/null @@ -1,58 +0,0 @@ -from typing import List - -from mmpretrain.structures import DataSample - - -class LlamaAadapterMMBenchPromptConstructor: - """Prompt constructor for Llama Adapter v2 on MMBench. - - Args: - image_prompt (str): Image prompt. Defaults to `''`. - reply_prompt (str): Reply prompt. Defaults to `''`. - """ - - def __init__(self, image_prompt: str = '', reply_prompt: str = '') -> None: - self.image_prompt = image_prompt - self.reply_prompt = reply_prompt - - def __call__(self, inputs: dict) -> dict: - """Construct prompt. - - Args: - inputs (dict): Input data containing image and data_samples. - - Returns: - dict: A dict containing prompt, images and data_samples. - """ - data_samples = inputs['data_samples'] - prompt = self._process(data_samples) - inputs.update({'prompt': prompt}) - - return inputs - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - # import pdb;pdb.set_trace() - question = [ - data_sample.get('question') for data_sample in data_samples - ] - options = [data_sample.get('options') for data_sample in data_samples] - if data_samples[0].get('context') is not None: - context = [ - data_sample.get('context') for data_sample in data_samples - ] - else: - context = [''] * len(data_samples) - prompts = [] - for cur_context, cur_question, cur_options in zip( - context, question, options): - prompts.append(cur_context + ' ' + cur_question + ' ' + - cur_options) # noqa - return prompts diff --git a/opencompass/multimodal/models/llava/__init__.py b/opencompass/multimodal/models/llava/__init__.py deleted file mode 100644 index 4fc919fa..00000000 --- a/opencompass/multimodal/models/llava/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .llava import LLaVA -from .post_processor import LLaVABasePostProcessor, LLaVAVSRPostProcessor -from .prompt_constructor import (LLaVABasePromptConstructor, - LLaVAMMBenchPromptConstructor, - LLaVAScienceQAPromptConstructor, - LLaVAVQAPromptConstructor) - -__all__ = [ - 'LLaVA', 'LLaVABasePromptConstructor', 'LLaVAMMBenchPromptConstructor', - 'LLaVABasePostProcessor', 'LLaVAVQAPromptConstructor', - 'LLaVAScienceQAPromptConstructor', 'LLaVAVSRPostProcessor' -] diff --git a/opencompass/multimodal/models/llava/llava.py b/opencompass/multimodal/models/llava/llava.py deleted file mode 100644 index 54835a78..00000000 --- a/opencompass/multimodal/models/llava/llava.py +++ /dev/null @@ -1,156 +0,0 @@ -import importlib -import os -import sys - -import mmengine -import torch -import torch.nn as nn -from mmengine.device import get_device -from transformers import StoppingCriteria - -from opencompass.registry import MM_MODELS - -IMAGE_TOKEN_INDEX = -200 - - -def load_package(): - """Load required packages from LLaVA.""" - current_file_path = os.path.abspath(__file__) - current_folder_path = os.path.dirname(current_file_path) - - sys.path.append(os.path.join(current_folder_path, 'LLaVA')) # noqa - return - - -class KeywordsStoppingCriteria(StoppingCriteria): - """Keyword stopping criteria implemented for llava.""" - - def __init__(self, keywords, tokenizer, input_ids): - self.keywords = keywords - self.tokenizer = tokenizer - self.start_len = None - self.input_ids = input_ids - - def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, - **kwargs) -> bool: - if self.start_len is None: - self.start_len = self.input_ids.shape[1] - else: - outputs = self.tokenizer.batch_decode(output_ids[:, - self.start_len:], - skip_special_tokens=True)[0] - for keyword in self.keywords: - if keyword in outputs: - return True - return False - - -@MM_MODELS.register_module('llava') -class LLaVA(nn.Module): - """Inference code of LLaVA. Need to clone LLaVA official repo first. Please - check out the README in config. - - Args: - model_path (str): The path of llava checkpoint. - prompt_constructor (dict): The config of prompt constructor. - post_processor (dict): The config of post processor. - is_caption_task (bool): Whether the task is caption task. - Defaults to False. - """ - - def __init__( - self, - model_path: str, - prompt_constructor: dict, - post_processor: dict, - is_caption_task: bool = False, - ) -> None: - super().__init__() - self.dtype = torch.float16 - self.is_caption_task = is_caption_task - - # load LLaVA modules - load_package() - mm_utils = importlib.import_module('llava.mm_utils') - builder = importlib.import_module('llava.model.builder') - - # load pretrained LLaVA - # Note: When encounters with device related errors, - # try setting `low_cpu_mem_usage` in `load_pretrained_model` as False - model_name = mm_utils.get_model_name_from_path(model_path) - tokenizer, model, _, _ = builder.load_pretrained_model( - model_path, None, model_name) - vision_tower = model.get_vision_tower() - vision_tower.to(device=get_device(), dtype=self.dtype) - model.to(device=get_device(), dtype=self.dtype) - - # load prompt constructor and post processor - if 'v1' in model_path.lower(): - conv_mode = 'llava_v1' - elif 'mpt' in model_path.lower(): - conv_mode = 'mpt_multimodal' - else: - conv_mode = 'multimodal' - mm_use_im_start_end = getattr(model.config, 'mm_use_im_start_end', - False) - prompt_constructor.update({ - 'conv_mode': conv_mode, - 'mm_use_im_start_end': mm_use_im_start_end - }) - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - self.model = model - self.tokenizer = tokenizer - - def generate(self, batch): - - prompt, stop_str = self.prompt_constructor(batch) - keywords = [stop_str] - data_sample = batch['data_samples'][0] - - image = batch['inputs'][0].unsqueeze(0) - if image is not None: - images = image.to(get_device()) - else: - images = None - - mm_utils = importlib.import_module('llava.mm_utils') - input_ids = mm_utils.tokenizer_image_token( - prompt, self.tokenizer, IMAGE_TOKEN_INDEX, - return_tensors='pt').unsqueeze(0).to(get_device()) - - stopping_criteria = KeywordsStoppingCriteria(keywords, self.tokenizer, - input_ids) - - with torch.inference_mode(): - output_ids = self.model.generate( - input_ids, - images=images.half(), - do_sample=True, - temperature=0.2, - max_new_tokens=1024, - stopping_criteria=[stopping_criteria], - ) - - input_token_len = input_ids.shape[1] - n_diff_input_output = (input_ids != - output_ids[:, :input_token_len]).sum().item() - if n_diff_input_output > 0: - print( - f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids' # noqa - ) - outputs = self.tokenizer.batch_decode(output_ids[:, input_token_len:], - skip_special_tokens=True)[0] - - output_text = self.post_processor(outputs, stop_str) - - if self.is_caption_task: - data_sample.pred_caption = output_text - else: - data_sample.pred_answer = output_text - return data_sample - - def forward(self, batch): - return self.generate(batch) diff --git a/opencompass/multimodal/models/llava/post_processor.py b/opencompass/multimodal/models/llava/post_processor.py deleted file mode 100644 index 51066182..00000000 --- a/opencompass/multimodal/models/llava/post_processor.py +++ /dev/null @@ -1,28 +0,0 @@ -class LLaVABasePostProcessor: - """Base post processor for LLaVA on MMBench.""" - - def __init__(self) -> None: - pass - - def __call__(self, outputs: str, stop_str: str) -> str: - outputs = outputs.strip() - if outputs.endswith(stop_str): - outputs = outputs[:-len(stop_str)] - output_text = outputs.strip() - return output_text - - -class LLaVAVSRPostProcessor(LLaVABasePostProcessor): - """VSR post processor for LLaVA on MMBench.""" - - def __init__(self) -> None: - super().__init__() - - def __call__(self, outputs: str, stop_str: str) -> str: - output_text = super().__call__(outputs, stop_str) - if 'yes' in output_text.lower(): - return 'yes' - elif 'no' in output_text.lower(): - return 'no' - else: - return 'unknown' diff --git a/opencompass/multimodal/models/llava/prompt_constructor.py b/opencompass/multimodal/models/llava/prompt_constructor.py deleted file mode 100644 index c25496a4..00000000 --- a/opencompass/multimodal/models/llava/prompt_constructor.py +++ /dev/null @@ -1,139 +0,0 @@ -import importlib - -DEFAULT_IMAGE_TOKEN = '' -DEFAULT_IMAGE_PATCH_TOKEN = '' -DEFAULT_IM_START_TOKEN = '' -DEFAULT_IM_END_TOKEN = '' - - -class LLaVABasePromptConstructor: - """Base prompt constructor for LLaVA. - - Args: - conv_mode (str): Version control args for different version of LLaVA. - mm_use_im_start_end (bool): - Config arg. Use start and end token when build prompt or not. - reply_prompt (str): Reply prompt added at the end. (Default: '') - """ - - def __init__(self, - conv_mode: str, - mm_use_im_start_end: bool, - reply_prompt: str = '') -> None: - conversation = importlib.import_module('llava.conversation') - self.conv_templates = conversation.conv_templates - self.conv_mode = conv_mode - self.mm_use_im_start_end = mm_use_im_start_end - self.SeparatorStyle = conversation.SeparatorStyle - self.reply_prompt = reply_prompt - - def __call__(self, inputs: dict) -> tuple: - """Construct prompt. - - Args: - inputs (dict): Input data containing images and data_samples. - - Returns: - tuple: A tuple containing prompt, images and data_samples. - """ - data_samples = inputs['data_samples'] - assert len(data_samples) == 1 - prompt = self._build_prompt(data_samples[0]) - if self.mm_use_im_start_end: - prompt = (DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + - DEFAULT_IM_END_TOKEN + '\n' + prompt) - else: - prompt = DEFAULT_IMAGE_TOKEN + '\n' + prompt # noqa - - conv = self.conv_templates[self.conv_mode].copy() - conv.append_message(conv.roles[0], prompt) - conv.append_message(conv.roles[1], None) - output_prompt = conv.get_prompt() - - stop_str = conv.sep if conv.sep_style != self.SeparatorStyle.TWO else conv.sep2 # noqa - - return output_prompt, stop_str - - def _build_prompt(self, data_sample): - return self.reply_prompt - - -class LLaVAMMBenchPromptConstructor(LLaVABasePromptConstructor): - """MMBench prompt constructor for LLaVA. - - Args: - conv_mode (str): Version control args for different version of LLaVA. - mm_use_im_start_end (bool): - Config arg. Use start and end token when build prompt or not. - reply_prompt (str): Reply prompt added at the end. (Default: '') - """ - - def __init__(self, - conv_mode: str, - mm_use_im_start_end: bool, - reply_prompt: str = '') -> None: - super().__init__(conv_mode, mm_use_im_start_end, reply_prompt) - - def _build_prompt(self, data_sample): - question = data_sample.get('question') - options = data_sample.get('options') - context = data_sample.get('context') - if context is not None: - prompt = context + ' ' + question + ' ' + options - else: - prompt = question + ' ' + options - prompt += self.reply_prompt - return prompt - - -class LLaVAVQAPromptConstructor(LLaVABasePromptConstructor): - """VQA prompt constructor for LLaVA. - - Args: - conv_mode (str): Version control args for different version of LLaVA. - mm_use_im_start_end (bool): - Config arg. Use start and end token when build prompt or not. - reply_prompt (str): Reply prompt added at the end. (Default: '') - """ - - def __init__(self, - conv_mode: str, - mm_use_im_start_end: bool, - reply_prompt: str = '') -> None: - super().__init__(conv_mode, mm_use_im_start_end, reply_prompt) - - def _build_prompt(self, data_sample): - prompt = data_sample.get('question') - prompt += self.reply_prompt - return prompt - - -class LLaVAScienceQAPromptConstructor(LLaVABasePromptConstructor): - """ScienceQA prompt constructor for LLaVA. - - Args: - conv_mode (str): Version control args for different version of LLaVA. - mm_use_im_start_end (bool): - Config arg. Use start and end token when build prompt or not. - reply_prompt (str): Reply prompt added at the end. (Default: '') - """ - - choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'} - - def __init__(self, - conv_mode: str, - mm_use_im_start_end: bool, - reply_prompt: str = '') -> None: - super().__init__(conv_mode, mm_use_im_start_end, reply_prompt) - - def _build_prompt(self, data_sample): - question = data_sample.get('question') - choices = data_sample.get('choices') - choices = [ - f'({self.choice_mapping[i]}) ' + item - for i, item in enumerate(choices) - ] - choices = 'Choices: ' + ' '.join(choices) + '\n' - context = 'Context: ' + data_sample.get('hint') + '\n' - prompt = context + question + choices + self.reply_prompt - return prompt diff --git a/opencompass/multimodal/models/minigpt_4/__init__.py b/opencompass/multimodal/models/minigpt_4/__init__.py deleted file mode 100644 index 20082111..00000000 --- a/opencompass/multimodal/models/minigpt_4/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .minigpt_4 import MiniGPT4Inferencer -from .post_processor import (MiniGPT4COCOCaptionPostProcessor, - MiniGPT4MMBenchPostProcessor, - MiniGPT4MMEPostProcessor, - MiniGPT4ScienceQAPostProcessor, - MiniGPT4VQAPostProcessor, - MiniGPT4VSRPostProcessor) -from .prompt_constructor import MiniGPT4VSRPromptConstructor # noqa -from .prompt_constructor import (MiniGPT4COCOCaotionPromptConstructor, - MiniGPT4MMBenchPromptConstructor, - MiniGPT4MMEPromptConstructor, - MiniGPT4ScienceQAPromptConstructor, - MiniGPT4SEEDBenchPromptConstructor, - MiniGPT4VQAPromptConstructor) - -__all__ = [ - 'MiniGPT4Inferencer', 'MiniGPT4MMBenchPostProcessor', - 'MiniGPT4MMBenchPromptConstructor', 'MiniGPT4COCOCaotionPromptConstructor', - 'MiniGPT4COCOCaptionPostProcessor', 'MiniGPT4ScienceQAPromptConstructor', - 'MiniGPT4ScienceQAPostProcessor', 'MiniGPT4VQAPromptConstructor', - 'MiniGPT4VQAPostProcessor', 'MiniGPT4VSRPostProcessor', - 'MiniGPT4VSRPromptConstructor', 'MiniGPT4SEEDBenchPromptConstructor', - 'MiniGPT4MMEPostProcessor', 'MiniGPT4MMEPromptConstructor' -] diff --git a/opencompass/multimodal/models/minigpt_4/minigpt_4.py b/opencompass/multimodal/models/minigpt_4/minigpt_4.py deleted file mode 100644 index f722526c..00000000 --- a/opencompass/multimodal/models/minigpt_4/minigpt_4.py +++ /dev/null @@ -1,295 +0,0 @@ -import os -import sys - -import mmengine -import torch -import torch.nn as nn -from mmengine.device import get_device -from transformers import StoppingCriteriaList - -from opencompass.registry import MM_MODELS - -from .utils import StoppingCriteriaSub - - -class LayerNorm(nn.LayerNorm): - """Subclass torch's LayerNorm to handle fp16.""" - - def forward(self, x: torch.Tensor): - orig_type = x.dtype - ret = super().forward(x.type(torch.float32)) - return ret.type(orig_type) - - -def load_package(): - """Load required packages from MiniGPT-4.""" - current_file_path = os.path.abspath(__file__) - current_folder_path = os.path.dirname(current_file_path) - - sys.path.append(os.path.join(current_folder_path, 'MiniGPT-4')) # noqa - - try: - # the latest version of MiniGPT4 - from minigpt4.models.minigpt4 import MiniGPT4 - except ImportError: - # the old version of MiniGPT4 - from minigpt4.models.mini_gpt4 import MiniGPT4 - - sys.path.pop(-1) - - return MiniGPT4 - - -MiniGPT4 = load_package() - - -@MM_MODELS.register_module('minigpt-4') -class MiniGPT4Inferencer(MiniGPT4): - """Inference code of MiniGPT-4. - - Args: - llama_model (str): The path of vicuna path. - prompt_constructor (dict): The config of prompt constructor. - post_processor (dict): The config of post processor. - do_sample (bool): Whether use sampling. Defaults to False. - max_length (int): The max length of output. Defaults to 30. - img_size (int): The size of image. Defaults to 224. - low_resource (bool): Whether loaded in low precision. - Defaults to False. - is_caption_task (bool): Whether the task is caption task. - Defaults to False. - """ - - def __init__(self, - llama_model: str, - prompt_constructor: dict, - post_processor: dict, - do_sample: bool = False, - max_length: int = 30, - img_size: int = 224, - low_resource: bool = False, - is_caption_task: bool = False, - mode: str = 'generation', - n_segments: int = 1) -> None: - super().__init__(llama_model=llama_model, - low_resource=low_resource, - img_size=img_size) - self.mode = mode - self.n_segments = n_segments - - cur_device = get_device() - stop_words_ids = [ - torch.tensor([835]).to(cur_device), - torch.tensor([2277, 29937]).to(cur_device), - ] - self.stopping_criteria = StoppingCriteriaList( - [StoppingCriteriaSub(stops=stop_words_ids)]) - - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - if post_processor is not None: - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - self.do_sample = do_sample - self.max_length = max_length - self.is_caption_task = is_caption_task - - def forward(self, batch): - if self.mode == 'generation': - return self.generate(batch) - elif self.mode == 'loss': - return self.loss(batch) - else: - raise RuntimeError(f'Invalid mode "{self.mode}".') - - def encode_img(self, image): - device = image.device - - with self.maybe_autocast(): - if image.dim() == 5: - inputs_llama, atts_llama = [], [] - for j in range(image.size(2)): - this_frame = image[:, :, j, :, :] - frame_embeds = self.ln_vision( - self.visual_encoder(this_frame)) - frame_atts = torch.ones(frame_embeds.size()[:-1], - dtype=torch.long).to(image.device) - - query_tokens = self.query_tokens.expand( - frame_embeds.shape[0], -1, -1) - frame_query_output = self.Qformer.bert( - query_embeds=query_tokens, - encoder_hidden_states=frame_embeds, - encoder_attention_mask=frame_atts, - return_dict=True, - ) - - frame_inputs_llama = self.llama_proj( - frame_query_output.last_hidden_state[:, :query_tokens. - size(1), :]) - frame_atts_llama = torch.ones( - frame_inputs_llama.size()[:-1], - dtype=torch.long).to(image.device) - inputs_llama.append(frame_inputs_llama) - atts_llama.append(frame_atts_llama) - inputs_llama = torch.cat(inputs_llama, dim=1) - atts_llama = torch.cat(atts_llama, dim=1) - else: - image_embeds = self.ln_vision( - self.visual_encoder(image)).to(device) - image_atts = torch.ones(image_embeds.size()[:-1], - dtype=torch.long).to(device) - - query_tokens = self.query_tokens.expand( - image_embeds.shape[0], -1, -1) - query_output = self.Qformer.bert( - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_atts, - return_dict=True, - ) - - inputs_llama = self.llama_proj(query_output.last_hidden_state) - atts_llama = torch.ones(inputs_llama.size()[:-1], - dtype=torch.long).to(image.device) - return inputs_llama, atts_llama - - def pack_inputs(self, batch): - images = [image.unsqueeze(0) for image in batch['inputs']] - data_samples = [data_sample for data_sample in batch['data_samples']] - images = torch.cat(images, dim=0).to(get_device()) - inputs = {'image': images, 'data_samples': data_samples} - return inputs - - def generate(self, batch): - inputs = self.pack_inputs(batch) - inputs = self.prompt_constructor(inputs) - image = inputs['image'] - prompt = inputs['prompt'] - data_samples = inputs['data_samples'] - - # The main process of generation - img_embeds, _ = self.encode_img(image) - prompt_segs = prompt.split('') - prompt_seg_tokens = [ - self.llama_tokenizer(seg, - return_tensors='pt', - add_special_tokens=i == 0). - to(self.llama_model.model.embed_tokens.weight.device).input_ids - for i, seg in enumerate(prompt_segs) - ] - prompt_seg_embs = [ - self.llama_model.model.embed_tokens(seg) - for seg in prompt_seg_tokens - ] - prompt_seg_embs = [prompt_seg_embs[0], img_embeds, prompt_seg_embs[1]] - prompt_embs = torch.cat(prompt_seg_embs, dim=1) - - # generate output - outputs = self.llama_model.generate( - inputs_embeds=prompt_embs, - max_length=self.max_length, - num_beams=5, - do_sample=self.do_sample, - min_length=1, - top_p=0.9, - repetition_penalty=1.0, - length_penalty=-1.0, - temperature=1.0, - stopping_criteria=self.stopping_criteria, - num_return_sequences=1) - - for i, data_sample in enumerate(data_samples): - output_token = outputs[i] - output_text = self.post_processor(output_token, - self.llama_tokenizer) - if self.is_caption_task: - data_sample.pred_caption = output_text - else: - data_sample.pred_answer = output_text - data_samples[i] = data_sample - return data_samples - - def loss(self, batch): - inputs = self.pack_inputs(batch) - inputs = self.prompt_constructor(inputs) - image = inputs['image'] - batch_size = image.size(0) - prompt = inputs['prompt'] - data_samples = inputs['data_samples'] - choices = data_samples[0].choices - - with torch.no_grad(): - img_embeds, atts_img = self.encode_img(image) - img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, - prompt) - - self.llama_tokenizer.padding_side = 'right' - - n_cands = len(choices) - losses = [] - for n in range(self.n_segments): - seg_len = n_cands // self.n_segments - if n == (self.n_segments - 1): - seg_len = n_cands - seg_len * (self.n_segments - 1) - - to_regress_tokens = self.llama_tokenizer( - choices, - return_tensors='pt', - padding='longest', - truncation=True, - max_length=self.max_txt_len, - add_special_tokens=False).to(image.device) - - targets = to_regress_tokens.input_ids.masked_fill( - to_regress_tokens.input_ids == - self.llama_tokenizer.pad_token_id, -100) - - empty_targets = ( - torch.ones([atts_img.shape[0], atts_img.shape[1] + 1], - dtype=torch.long).to(image.device).fill_( - -100) # plus one for bos - ) - empty_targets = empty_targets.repeat_interleave(seg_len, dim=0) - targets = torch.cat([empty_targets, targets], dim=1) - - bos = torch.ones([batch_size, 1], - dtype=to_regress_tokens.input_ids.dtype, - device=to_regress_tokens.input_ids.device - ) * self.llama_tokenizer.bos_token_id - bos_embeds = self.llama_model.model.embed_tokens(bos) - bos_embeds = bos_embeds.repeat_interleave(seg_len, dim=0) - img_embeds = img_embeds.repeat_interleave(seg_len, dim=0) - - atts_bos = atts_img[:, :1] - atts_bos = atts_bos.repeat_interleave(seg_len, dim=0) - atts_img = atts_img.repeat_interleave(seg_len, dim=0) - - to_regress_embeds = self.llama_model.model.embed_tokens( - to_regress_tokens.input_ids) - - inputs_embeds = torch.cat( - [bos_embeds, img_embeds, to_regress_embeds], dim=1) - attention_mask = torch.cat( - [atts_bos, atts_img, to_regress_tokens.attention_mask], - dim=1) - - with self.maybe_autocast(): - outputs = self.llama_model( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - return_dict=True, - labels=targets, - reduction='none', - ) - loss = outputs.loss - loss = loss.view(targets.size(0), -1).sum(1) - loss = loss.reshape(batch_size, seg_len) - losses.append(loss) - # losses of 4 choices - losses = torch.cat(losses, dim=-1)[0] - - for i, data_sample in enumerate(data_samples): - data_sample.losses = losses - data_samples[i] = data_sample - return data_samples diff --git a/opencompass/multimodal/models/minigpt_4/post_processor.py b/opencompass/multimodal/models/minigpt_4/post_processor.py deleted file mode 100644 index b1f3428e..00000000 --- a/opencompass/multimodal/models/minigpt_4/post_processor.py +++ /dev/null @@ -1,142 +0,0 @@ -import random -import re - -import torch - - -class MiniGPT4MMBenchPostProcessor: - """"Post processor for MiniGPT-4 on MMBench.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = self._extract_key_words(output_text) - return output_text - - def _extract_key_words(self, output_text: str) -> str: - - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - output_text = output_text.strip('') - output_text = output_text.strip('') - output_text = output_text.strip() - pattern = re.compile(r'([A-Z]\.)') - res = pattern.findall(output_text) - if len(res) > 0: - output_text = res[0][:-1] - return output_text - - -class MiniGPT4COCOCaptionPostProcessor: - """"Post processor for MiniGPT-4 on COCO Caption.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - output_text = output_text.split('. ')[0] - output_text = output_text.strip('') - output_text = output_text.strip() - return output_text - - -class MiniGPT4ScienceQAPostProcessor: - """"Post processor for MiniGPT-4 on ScienceQA.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - pattern = re.compile(r'\(([A-Z])\)') - output_text = pattern.findall(output_text) - if len(output_text) == 0: - output_text = random.choice(['A', 'B', 'C', 'D']) - else: - output_text = output_text[0] - return output_text - - -class MiniGPT4VQAPostProcessor: - """"Post processor for MiniGPT-4 on VQA.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = output_text.split('###')[0] - output_text = output_text.split('Assistant:')[-1].strip() - return output_text - - -class MiniGPT4VSRPostProcessor: - """"Post processor for MiniGPT-4 on VSR.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, add_special_tokens=False) - pattern = r'yes|no|Yes|No' - output_text = re.findall(pattern, output_text) - if len(output_text) > 0: - output_text = output_text[0].lower() - return output_text - - -class MiniGPT4MMEPostProcessor(MiniGPT4MMBenchPostProcessor): - """"Post processor for MiniGPT-4 on MME.""" - - def __init__(self) -> None: - super().__init__() - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - response = super().__call__(output_token, tokenizer) - # extract yes or no, copy from MME official evaluation script - prefix_pred_ans = response[:4].lower() - - if 'yes' in prefix_pred_ans: - pred_label = 'yes' - elif 'no' in prefix_pred_ans: - pred_label = 'no' - else: - pred_label = 'other' - - return pred_label diff --git a/opencompass/multimodal/models/minigpt_4/prompt_constructor.py b/opencompass/multimodal/models/minigpt_4/prompt_constructor.py deleted file mode 100644 index f6d8604d..00000000 --- a/opencompass/multimodal/models/minigpt_4/prompt_constructor.py +++ /dev/null @@ -1,187 +0,0 @@ -from typing import List - -from mmpretrain.structures import DataSample - - -class MiniGPT4MMBenchPromptConstructor: - """Prompt constructor for MiniGPT-4 on MMBench. - - Args: - image_prompt (str): Image prompt. Defaults to `''`. - reply_prompt (str): Reply prompt. Defaults to `''`. - """ - - def __init__(self, image_prompt: str = '', reply_prompt: str = '') -> None: - self.image_prompt = image_prompt - self.reply_prompt = reply_prompt - - def __call__(self, inputs: dict) -> dict: - """Construct prompt. - - Args: - inputs (dict): Input data containing image and data_samples. - - Returns: - dict: A dict containing prompt, images and data_samples. - """ - data_samples = inputs['data_samples'] - prompt = self._process(data_samples) - inputs.update({'prompt': prompt}) - - return inputs - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - options = [data_sample.get('options') for data_sample in data_samples] - contexts = [data_sample.get('context') for data_sample in data_samples] - question = questions[0] - option = options[0] - context = contexts[0] - if context is not None: - prompt = self.image_prompt + ' ' + context + ' ' + question + ' ' + option + ' ' + self.reply_prompt # noqa - else: - prompt = self.image_prompt + ' ' + question + ' ' + option + ' ' + self.reply_prompt # noqa - return prompt - - -class MiniGPT4COCOCaotionPromptConstructor(MiniGPT4MMBenchPromptConstructor): - """Prompt constructor for MiniGPT-4 on COCO Caption.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - prompt = self.image_prompt + ' ' + 'a photo of' + self.reply_prompt - return prompt - - -class MiniGPT4ScienceQAPromptConstructor(MiniGPT4MMBenchPromptConstructor): - """Prompt constructor for MiniGPT-4 on ScienceQA.""" - - choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'} - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - 'Question: ' + data_sample.get('question') + '\n' - for data_sample in data_samples - ] # noqa - choices = [data_sample.get('choices') for data_sample in data_samples] - choices = [[ - f'({self.choice_mapping[i]}) ' + item - for i, item in enumerate(choice) - ] for choice in choices] - choices = [ - 'Choices: ' + ' '.join(choice) + '\n' for choice in choices - ] # noqa - contexts = [ - 'Context: ' + data_sample.get('hint') + '\n' - for data_sample in data_samples - ] # noqa - question = questions[0] - choice = choices[0] - context = contexts[0] - prompt = self.image_prompt + ' ' + context + ' ' + question + ' ' + choice + self.reply_prompt + ' ' + 'The answer is' # noqa - return prompt - - -class MiniGPT4VQAPromptConstructor(MiniGPT4MMBenchPromptConstructor): - """Prompt constructor for MiniGPT-4 on VQA.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = self.image_prompt + ' ' + question + ' ' + 'Answer this question in a single word.' + ' ' + self.reply_prompt # noqa - return prompt - - -class MiniGPT4VSRPromptConstructor(MiniGPT4MMBenchPromptConstructor): - """Prompt constructor for MiniGPT-4 on VSR.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = self.image_prompt + ' ' + question + ' ' + 'Is the above description correct? Answer yes or no.' + ' ' + self.reply_prompt # noqa - return prompt - - -class MiniGPT4SEEDBenchPromptConstructor(MiniGPT4MMBenchPromptConstructor): - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = self.image_prompt + ' ' + question + ' ' + self.reply_prompt - return prompt - - -class MiniGPT4MMEPromptConstructor: - """Prompt constructor for MiniGPT-4 on MME. - - Args: - image_prompt (str): Image prompt. Defaults to `''`. - reply_prompt (str): Reply prompt. Defaults to `''`. - """ - - def __init__(self) -> None: - self.system_prompt = ( - 'Give the following image: ImageContent.' - 'You will be able to see the image once I provide it to you.' - 'Please answer my questions.') - self.sep = '###' - - def __call__(self, inputs: dict) -> dict: - """Construct prompt. - - Args: - inputs (dict): Input data containing image and data_samples. - - Returns: - dict: A dict containing prompt, images and data_samples. - """ - data_samples = inputs['data_samples'] - prompt = self._process(data_samples) - inputs.update({'prompt': prompt}) - - return inputs - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - assert len(data_samples) == 1, 'Only support batch size 1.' - question = data_samples[0].get('question') - prompt = self.system_prompt + self.sep - prompt += 'Human: ' + question + ' ' + '' + ' ' + self.sep # noqa - prompt += 'Assistant: ' - return prompt diff --git a/opencompass/multimodal/models/minigpt_4/utils.py b/opencompass/multimodal/models/minigpt_4/utils.py deleted file mode 100644 index 777c1939..00000000 --- a/opencompass/multimodal/models/minigpt_4/utils.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import re - -import timm.models.hub as timm_hub -import torch -import torch.distributed as dist -from mmengine.dist import is_distributed, is_main_process -from transformers import StoppingCriteria - - -class StoppingCriteriaSub(StoppingCriteria): - - def __init__(self, stops=[], encounters=1): - super().__init__() - self.stops = stops - - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): - for stop in self.stops: - if torch.all((stop == input_ids[0][-len(stop):])).item(): - return True - - return False - - -def download_cached_file(url, check_hash=True, progress=False): - """Download a file from a URL and cache it locally. - - If the file already exists, it is not downloaded again. If distributed, - only the main process downloads the file, and the other processes wait for - the file to be downloaded. - """ - - def get_cached_file_path(): - # a hack to sync the file path across processes - parts = torch.hub.urlparse(url) - filename = os.path.basename(parts.path) - cached_file = os.path.join(timm_hub.get_cache_dir(), filename) - - return cached_file - - if is_main_process(): - timm_hub.download_cached_file(url, check_hash, progress) - - if is_distributed(): - dist.barrier() - - return get_cached_file_path() - - -def is_url(input_url): - """Check if an input string is a url. - - look for http(s):// and ignoring the case - """ - is_url = re.match(r'^(?:http)s?://', input_url, re.IGNORECASE) is not None - return is_url diff --git a/opencompass/multimodal/models/mplug_owl/__init__.py b/opencompass/multimodal/models/mplug_owl/__init__.py deleted file mode 100644 index d5ba4073..00000000 --- a/opencompass/multimodal/models/mplug_owl/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .mplug_owl_7b import MplugOwl -from .post_processor import MplugOwlMMBenchPostProcessor -from .prompt_constructor import MplugOwlMMBenchPromptConstructor # noqa - -__all__ = [ - 'MplugOwl', 'MplugOwlMMBenchPostProcessor', - 'MplugOwlMMBenchPromptConstructor' -] diff --git a/opencompass/multimodal/models/mplug_owl/mplug_owl_7b.py b/opencompass/multimodal/models/mplug_owl/mplug_owl_7b.py deleted file mode 100644 index fe564fb7..00000000 --- a/opencompass/multimodal/models/mplug_owl/mplug_owl_7b.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import sys - -import mmengine -import torch -import torch.nn as nn -from mmengine.device import get_device - -from opencompass.registry import MM_MODELS - - -def load_package(): - """Load required packages from llama_adapter_v2_multimodal7b.""" - current_file_path = os.path.abspath(__file__) - current_folder_path = os.path.dirname(current_file_path) - - sys.path.append(os.path.join(current_folder_path, 'mPLUG-Owl')) # noqa - from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration - from mplug_owl.processing_mplug_owl import (MplugOwlImageProcessor, - MplugOwlProcessor) - from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer - sys.path.pop(-1) - - return MplugOwlForConditionalGeneration, MplugOwlImageProcessor, MplugOwlProcessor, MplugOwlTokenizer # noqa - - -MplugOwlForConditionalGeneration, MplugOwlImageProcessor, MplugOwlProcessor, MplugOwlTokenizer = load_package( # noqa -) # noqa - - -@MM_MODELS.register_module('mplug_owl_7b') -class MplugOwl(nn.Module): - - def __init__(self, - prompt_constructor: dict, - post_processor: dict, - model_path='MAGAer13/mplug-owl-llama-7b', - mode: str = 'generation'): - super().__init__() - pretrained_ckpt = model_path - # import pdb;pdb.set_trace() - print(pretrained_ckpt) - self.model = MplugOwlForConditionalGeneration.from_pretrained( - pretrained_ckpt, - torch_dtype=torch.bfloat16, - ).cuda() - self.image_processor = MplugOwlImageProcessor.from_pretrained( - pretrained_ckpt) - self.tokenizer = MplugOwlTokenizer.from_pretrained(pretrained_ckpt) - self.processor = MplugOwlProcessor(self.image_processor, - self.tokenizer) - self.generate_kwargs = { - 'do_sample': False, - 'top_k': 5, - 'max_length': 20, - 'num_beams': 3, - } - - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - if post_processor is not None: - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - - self.mode = mode - - def forward(self, batch): - if self.mode == 'generation': - return self.generate(batch) - - def generate(self, batch): - images = [image.unsqueeze(0) for image in batch['inputs']] - data_samples = [data_sample for data_sample in batch['data_samples']] - images = torch.cat(images, dim=0).to(get_device()) - inputs = {'image': images, 'data_samples': data_samples} - inputs = self.prompt_constructor(inputs) - image = inputs['image'] - prompt = inputs['prompt'][0] - data_samples = inputs['data_samples'] - - data_sample = data_samples[0] - owl_template = """The following is a conversation - between a curious human and AI assistant. - The assistant gives helpful, detailed, and - polite answers to the user's questions. - Human: - Human: {text_input} - AI: """ - prompt = owl_template.format(text_input=prompt) - inputs = self.processor(text=[prompt], return_tensors='pt') - inputs['pixel_values'] = image - # inputs['pixel_values'] = torch.zeros_like(samples['image']) - inputs = { - k: v.bfloat16() if v.dtype == torch.float else v - for k, v in inputs.items() - } - inputs = {k: v.to(self.model.device) for k, v in inputs.items()} - with torch.no_grad(): - res = self.model.generate(**inputs, **self.generate_kwargs) - output_text = self.tokenizer.decode(res.tolist()[0], - skip_special_tokens=True) - output_text = self.post_processor(output_text) - data_sample.pred_answer = output_text - return data_sample diff --git a/opencompass/multimodal/models/mplug_owl/post_processor.py b/opencompass/multimodal/models/mplug_owl/post_processor.py deleted file mode 100644 index 6339e1e6..00000000 --- a/opencompass/multimodal/models/mplug_owl/post_processor.py +++ /dev/null @@ -1,17 +0,0 @@ -import re - -import torch - - -class MplugOwlMMBenchPostProcessor: - """"Post processor for MplugOwl on MMBench.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor) -> str: - pattern = re.compile(r'([A-Z]\.)') - res = pattern.findall(output_token) - if len(res) > 0: - output_token = res[0][:-1] - return output_token diff --git a/opencompass/multimodal/models/mplug_owl/prompt_constructor.py b/opencompass/multimodal/models/mplug_owl/prompt_constructor.py deleted file mode 100644 index 6e7bc17e..00000000 --- a/opencompass/multimodal/models/mplug_owl/prompt_constructor.py +++ /dev/null @@ -1,58 +0,0 @@ -from typing import List - -from mmpretrain.structures import DataSample - - -class MplugOwlMMBenchPromptConstructor: - """Prompt constructor for MplugOwl on MMBench. - - Args: - image_prompt (str): Image prompt. Defaults to `''`. - reply_prompt (str): Reply prompt. Defaults to `''`. - """ - - def __init__(self, image_prompt: str = '', reply_prompt: str = '') -> None: - self.image_prompt = image_prompt - self.reply_prompt = reply_prompt - - def __call__(self, inputs: dict) -> dict: - """Construct prompt. - - Args: - inputs (dict): Input data containing image and data_samples. - - Returns: - dict: A dict containing prompt, images and data_samples. - """ - data_samples = inputs['data_samples'] - prompt = self._process(data_samples) - inputs.update({'prompt': prompt}) - - return inputs - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - question = [ - data_sample.get('question') for data_sample in data_samples - ] - options = [data_sample.get('options') for data_sample in data_samples] - if data_samples[0].get('context') is not None: - context = [ - data_sample.get('context') for data_sample in data_samples - ] - else: - context = [''] * len(data_samples) - prompts = [] - for cur_context, cur_question, cur_options in zip( - context, question, options): - prompts.append(cur_context + ' ' + cur_question + ' ' + - cur_options) # noqa - - return prompts diff --git a/opencompass/multimodal/models/openflamingo/__init__.py b/opencompass/multimodal/models/openflamingo/__init__.py deleted file mode 100644 index e83bb40f..00000000 --- a/opencompass/multimodal/models/openflamingo/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .openflamingo import OpenFlamingoInferencer -from .post_processor import OpenFlamingoVSRPostProcessor -from .prompt_constructor import (OpenFlamingoCaptionPromptConstructor, - OpenFlamingoMMBenchPromptConstructor, - OpenFlamingoScienceQAPromptConstructor, - OpenFlamingoVQAPromptConstructor) - -__all__ = [ - 'OpenFlamingoInferencer', 'OpenFlamingoMMBenchPromptConstructor', - 'OpenFlamingoCaptionPromptConstructor', 'OpenFlamingoVQAPromptConstructor', - 'OpenFlamingoScienceQAPromptConstructor', 'OpenFlamingoVSRPostProcessor' -] diff --git a/opencompass/multimodal/models/openflamingo/openflamingo.py b/opencompass/multimodal/models/openflamingo/openflamingo.py deleted file mode 100644 index d22bd8f3..00000000 --- a/opencompass/multimodal/models/openflamingo/openflamingo.py +++ /dev/null @@ -1,110 +0,0 @@ -import re -from typing import List, Optional, Union - -import mmengine -import torch -from mmpretrain.models.multimodal import Flamingo -from mmpretrain.structures import DataSample - -from opencompass.registry import MM_MODELS - - -@MM_MODELS.register_module('openflamingo') -class OpenFlamingoInferencer(Flamingo): - """Inference code of OpenFlamingo. - - Args: - prompt_constructor (optional, dict): The config of prompt constructor. - Defaults to None. - post_processor (optional, dict): The config of post processor. - Defaults to None. - mode (str): The mode of inference. Defaults to 'generation'. - """ - - def __init__(self, - prompt_constructor: dict, - post_processor: Optional[dict] = None, - mode: str = 'generation', - **kwargs): - super().__init__(**kwargs) - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - if post_processor is not None: - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - else: - self.post_processor = None - self.mode = mode - - def preprocess_text(self, data_samples: List[DataSample], - device: torch.device) -> List[DataSample]: - """Preprocess text in advance before fed into language model. - - Args: - data_samples (List[DataSample]): The annotation - data of every samples. Defaults to None. - device (torch.device): Device for text to put on. - - Returns: - List[DataSample]: Return list of data samples. - """ - prompts = self.prompt_constructor(data_samples) - - self.tokenizer.padding_side = 'left' - input_text = self.tokenizer( - prompts, - padding='longest', - truncation=True, - return_tensors='pt', - max_length=2000, - ).to(device) - return input_text - - def post_process( - self, outputs: torch.Tensor, - data_samples: Optional[List[DataSample]]) -> List[DataSample]: - """Perform post process for outputs for different task. - - Args: - outputs (torch.Tensor): The generated outputs. - data_samples (List[DataSample], optional): The annotation - data of every samples. - - Returns: - List[DataSample]: Return list of data samples. - """ - outputs = self.tokenizer.batch_decode(outputs, - skip_special_tokens=True) - - if data_samples is None: - data_samples = [DataSample() for _ in range(len(outputs))] - - for output, data_sample in zip(outputs, data_samples): - # remove text pattern - if self.task == 'caption': - data_sample.pred_caption = re.split('Output', output, - 1)[0].replace('"', '') - if self.post_processor: - data_sample.pred_caption = self.post_processor( - data_sample.pred_caption) - elif self.task == 'vqa': - data_sample.pred_answer = re.split('Question|Answer', output, - 1)[0] - if self.post_processor: - data_sample.pred_answer = self.post_processor( - data_sample.pred_answer) - - return data_samples - - def forward(self, batch: dict) -> Union[DataSample, List[DataSample]]: - - if self.mode == 'generation': - return self.generate(batch) - else: - raise RuntimeError(f'Unsupported mode: {self.mode}') - - def generate(self, batch: dict) -> Union[DataSample, List[DataSample]]: - batch = self.data_preprocessor(batch, False) - images = batch['images'] - data_samples = batch['data_samples'] - return self.predict(images, data_samples) diff --git a/opencompass/multimodal/models/openflamingo/post_processor.py b/opencompass/multimodal/models/openflamingo/post_processor.py deleted file mode 100644 index 096805f1..00000000 --- a/opencompass/multimodal/models/openflamingo/post_processor.py +++ /dev/null @@ -1,13 +0,0 @@ -class OpenFlamingoVSRPostProcessor: - """VSR post processor for Openflamingo.""" - - def __init__(self) -> None: - pass - - def __call__(self, raw_response: str) -> str: - if 'yes' in raw_response.lower(): - return 'yes' - elif 'no' in raw_response.lower(): - return 'no' - else: - return 'unknown' diff --git a/opencompass/multimodal/models/openflamingo/prompt_constructor.py b/opencompass/multimodal/models/openflamingo/prompt_constructor.py deleted file mode 100644 index de64be37..00000000 --- a/opencompass/multimodal/models/openflamingo/prompt_constructor.py +++ /dev/null @@ -1,130 +0,0 @@ -from typing import Optional - -from mmpretrain.structures import DataSample - - -class OpenFlamingoMMBenchPromptConstructor: - """MMBench prompt constructor for OpenFlamingo.""" - - def __init__(self) -> None: - pass - - def __call__(self, data_samples: DataSample) -> tuple: - """Construct prompt. - - Args: - data_samples (DataSample): Input data_samples. - - Returns: - Raw text input (str). - """ - assert len(data_samples) == 1 - sample = data_samples[0] - prompts = [] - question = sample.get('question') - option = sample.get('options') - - prompt = '' + question + ' ' + option + ' ' + 'Answer:' - if sample.get('context') is not None: - prompt = sample.get('context') + ' ' + prompt - - prompts.append(prompt) - - return prompts - - -class OpenFlamingoCaptionPromptConstructor: - """Caption prompt constructor for OpenFlamingo.""" - - def __init__(self, shot_prompt: Optional[str] = None) -> None: - if shot_prompt: - self.shot_prompt = shot_prompt - else: - self.shot_prompt = ( - 'Output:A child holding a flowered umbrella and petting a yak.<|endofchunk|>' # noqa - 'Output:The child is holding a brush close to his mouth.<|endofchunk|>' # noqa - ) # noqa - - def __call__(self, data_samples: DataSample) -> tuple: - """Construct prompt. - - Args: - data_samples (DataSample): Input data_samples. - - Returns: - Raw text input (str). - """ - assert len(data_samples) == 1 - prompts = [] - prompt = 'Output:' - prompts.append(self.shot_prompt + prompt) - return prompts - - -class OpenFlamingoVQAPromptConstructor: - """VQA prompt constructor for OpenFlamingo.""" - - def __init__(self, shot_prompt: Optional[str] = None) -> None: - if shot_prompt: - self.shot_prompt = shot_prompt - else: - self.shot_prompt = ( - 'Question:Is the sky dark? Short Answer:yes<|endofchunk|>' # noqa: E501 - 'Question:What is on the white wall? Short Answer:pipe<|endofchunk|>' # noqa: E501 - ) # noqa - - def __call__(self, data_samples: DataSample) -> tuple: - """Construct prompt. - - Args: - data_samples (DataSample): Input data_samples. - - Returns: - Raw text input (str). - """ - prompts = [] - for sample in data_samples: - question = sample.get('question') - prompt = 'Question:{} Short Answer:'.format(question) - prompts.append(self.shot_prompt + prompt) - return prompts - - -class OpenFlamingoScienceQAPromptConstructor: - """ScienceQA prompt constructor for OpenFlamingo.""" - choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'} - - def __init__(self, shot_prompt: Optional[str] = None) -> None: - if shot_prompt: - self.shot_prompt = shot_prompt - else: - self.shot_prompt = ( - "Context:Question:Which of these states is farthest north? Choices:['(A) West Virginia' '(B) Louisiana' '(C) Arizona' '(D) Oklahoma'] Answer with a single character: A<|endofchunk|>" # noqa - 'Context:The diagrams below show two pure samples of gas in identical closed, rigid containers. Each colored ball represents one gas particle. Both samples have the same number of particles.' # noqa - "Question:Compare the average kinetic energies of the particles in each sample. Which sample has the higher temperature? Choices:'[(A) neither' '(B) sample A' '(C) sample B'] Answer with a single character: C<|endofchunk|>" # noqa - ) # noqa - - def __call__(self, data_samples: DataSample) -> tuple: - """Construct prompt. - - Args: - data_samples (DataSample): Input data_samples. - - Returns: - Raw text input (str). - """ - assert len(data_samples) == 1 - sample = data_samples[0] - question = sample.get('question') - choices = sample.get('choices') - choices = [ - f'({self.choice_mapping[i]}) ' + item - for i, item in enumerate(choices) - ] - hint = sample.get('hint') - prompts = [] - prompt = 'Context:{} Question:{} Choices:{}'.format( - hint, question, choices) - prompt += ' Answer with a single character:' - prompts.append(self.shot_prompt + prompt) - return prompts diff --git a/opencompass/multimodal/models/otter/__init__.py b/opencompass/multimodal/models/otter/__init__.py deleted file mode 100644 index e60056df..00000000 --- a/opencompass/multimodal/models/otter/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import TYPE_CHECKING - -from transformers.utils import (OptionalDependencyNotAvailable, - is_torch_available) - -if TYPE_CHECKING: - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - -from .otter import Otter -from .post_processor import OTTERMMBenchPostProcessor -from .prompt_constructor import OTTERMMBenchPromptConstructor - -__all__ = [ - 'Otter', 'OTTERMMBenchPromptConstructor', 'OTTERMMBenchPostProcessor' -] diff --git a/opencompass/multimodal/models/otter/otter.py b/opencompass/multimodal/models/otter/otter.py deleted file mode 100644 index ced2ba09..00000000 --- a/opencompass/multimodal/models/otter/otter.py +++ /dev/null @@ -1,79 +0,0 @@ -import importlib - -import mmengine -import torch -import torch.nn as nn -from mmengine.device import get_device - -from opencompass.registry import MM_MODELS - - -@MM_MODELS.register_module('otter-9b') -class Otter(nn.Module): - """Inference code of OTTER. - - Model details: - OTTER: a multi-modal model based on OpenFlamingo - (open-sourced version of DeepMind's Flamingo) - https://github.com/Luodian/Otter - Args: - model_path (str): The path of OTTER model - in Huggingface model hub format. - load_bit (str): The bit of OTTER model, can be "fp32" or "bf16". - mode (str): The mode of inference. Defaults to 'generation'. - """ - - def __init__(self, - model_path, - load_bit, - prompt_constructor, - post_processor, - mode='generation') -> None: - super().__init__() - torch_dtype = torch.bfloat16 if load_bit == 'bf16' else torch.float32 - otter_ai = importlib.import_module('otter_ai') - self.model = otter_ai.OtterForConditionalGeneration.from_pretrained( - model_path, torch_dtype=torch_dtype, device_map=get_device()) - self.tokenizer = self.model.text_tokenizer - self.tokenizer.padding_side = 'left' - self.model_dtype = next(self.model.parameters()).dtype - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - if post_processor is not None: - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - self.mode = mode - - def forward(self, batch): - if self.mode == 'generation': - return self.generate(batch) - elif self.mode == 'loss': - return self.loss(batch) - else: - raise RuntimeError(f'Invalid mode "{self.mode}".') - - def generate(self, batch): - inputs = self.prompt_constructor(batch) - image = inputs['image'] - prompt = inputs['prompt'] - data_samples = inputs['data_samples'] - vision_x = image.unsqueeze(1).unsqueeze(0).to(dtype=self.model_dtype) - lang_x = self.model.text_tokenizer([prompt], return_tensors='pt') - bad_words_id = self.model.text_tokenizer(['User:', 'GPT:']).input_ids - generated_text = self.model.generate( - vision_x=vision_x.to(self.model.device), - lang_x=lang_x['input_ids'].to(self.model.device), - attention_mask=lang_x['attention_mask'].to(self.model.device), - do_sample=False, - max_new_tokens=512, - num_beams=3, - bad_words_ids=bad_words_id, - no_repeat_ngram_size=3, - ) - for i, data_sample in enumerate(data_samples): - output_text = self.post_processor(generated_text[i], - self.model.text_tokenizer) - data_sample.pred_answer = output_text - data_samples[i] = data_sample - - return data_samples diff --git a/opencompass/multimodal/models/otter/post_processor.py b/opencompass/multimodal/models/otter/post_processor.py deleted file mode 100644 index e1f0064f..00000000 --- a/opencompass/multimodal/models/otter/post_processor.py +++ /dev/null @@ -1,139 +0,0 @@ -import random -import re - -import torch - - -class OTTERMMBenchPostProcessor: - """"Post processor for OTTER on MMBench.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = self._extract_key_words(output_text) - return output_text - - def _extract_key_words(self, output_text: str) -> str: - output_text = (output_text.split('')[-1].lstrip().rstrip(). - split('<|endofchunk|>')[0].lstrip().rstrip()) - pattern = re.compile(r'([A-Z]\.)') - res = pattern.findall(output_text) - if len(res) > 0: - output_text = res[0][:-1] - return output_text - - -class OTTERCOCOCaptionPostProcessor: - """"Post processor for OTTER on COCO Caption.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = (output_text.split('')[-1].lstrip().rstrip(). - split('<|endofchunk|>')[0].lstrip().rstrip()) - pattern = re.compile(r'([A-Z]\.)') - res = pattern.findall(output_text) - if len(res) > 0: - output_text = res[0][:-1] - return output_text - - -class OTTERScienceQAPostProcessor: - """"Post processor for OTTER on ScienceQA.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = (output_text.split('')[-1].lstrip().rstrip(). - split('<|endofchunk|>')[0].lstrip().rstrip()) - pattern = re.compile(r'\(([A-Z])\)') - output_text = pattern.findall(output_text) - if len(output_text) == 0: - output_text = random.choice(['A', 'B', 'C', 'D']) - else: - output_text = output_text[0] - return output_text - - -class OTTERVQAPostProcessor: - """"Post processor for OTTER on VQA.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, - add_special_tokens=False) # noqa - output_text = (output_text.split('')[-1].lstrip().rstrip(). - split('<|endofchunk|>')[0].lstrip().rstrip()) - return output_text - - -class OTTERVSRPostProcessor: - """"Post processor for OTTER on VSR.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - - if output_token[0] == 0: - output_token = output_token[1:] - if output_token[0] == 1: - output_token = output_token[1:] - output_text = tokenizer.decode(output_token, add_special_tokens=False) - pattern = r'yes|no|Yes|No' - output_text = re.findall(pattern, output_text) - if len(output_text) > 0: - output_text = output_text[0].lower() - return output_text - - -class OTTERMMEPostProcessor(OTTERMMBenchPostProcessor): - """"Post processor for OTTER on MME.""" - - def __init__(self) -> None: - super().__init__() - - def __call__(self, output_token: torch.tensor, tokenizer) -> str: - response = super().__call__(output_token, tokenizer) - # extract yes or no, copy from MME official evaluation script - prefix_pred_ans = response[:4].lower() - - if 'yes' in prefix_pred_ans: - pred_label = 'yes' - elif 'no' in prefix_pred_ans: - pred_label = 'no' - else: - pred_label = 'other' - - return pred_label diff --git a/opencompass/multimodal/models/otter/prompt_constructor.py b/opencompass/multimodal/models/otter/prompt_constructor.py deleted file mode 100644 index 7d16582e..00000000 --- a/opencompass/multimodal/models/otter/prompt_constructor.py +++ /dev/null @@ -1,168 +0,0 @@ -from typing import List - -import torch -from mmpretrain.structures import DataSample - - -class OTTERMMBenchPromptConstructor: - """Prompt constructor for OTTER on MMBench. - - Args: - image_prompt (str): Image prompt. Defaults to `''`. - reply_prompt (str): Reply prompt. Defaults to `''`. - """ - - def __init__(self, user_label: str = '', model_label: str = '') -> None: - self.image_token = '' - self.reply_token = '' - self.user_label = user_label - self.model_label = model_label - - def __call__(self, inputs: dict) -> dict: - """Construct prompt. - - Args: - inputs (dict): Input data containing image and data_samples. - - Returns: - dict: A dict containing prompt, images and data_samples. - """ - images = [image.unsqueeze(0) for image in inputs['inputs']] - data_samples = [data_sample for data_sample in inputs['data_samples']] - images = torch.cat(images, dim=0) - inputs = {'image': images, 'data_samples': data_samples} - data_samples = inputs['data_samples'] - prompt = self._process(data_samples) - inputs.update({'prompt': prompt}) - - return inputs - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - assert len(data_samples) == 1, 'Only support batch size 1.' - data_sample = data_samples[0] - question = data_sample.get('question') - options = data_sample.get('options') - context = data_sample.get('context') - # e.g. User: What is the color of the sky? A: Blue B: Red C: Green D: Yellow GPT: # noqa - if context is not None: - prompt = f'{self.image_token}{self.user_label} {context} {question} {options} {self.model_label}:{self.reply_token}' # noqa - else: - prompt = f'{self.image_token}{self.user_label} {question} {options} {self.model_label}:{self.reply_token}' # noqa - - return prompt - - -class OTTERCOCOCaotionPromptConstructor(OTTERMMBenchPromptConstructor): - """Prompt constructor for OTTER on COCO Caption.""" - - def _process(self, data_samples: List[DataSample]) -> str: - # e.g. User: a photo of GPT: # noqa - prompt = f'{self.image_token}{self.user_label} a photo of {self.model_label}:{self.reply_token}' # noqa - return prompt - - -class OTTERScienceQAPromptConstructor(OTTERMMBenchPromptConstructor): - """Prompt constructor for OTTER on ScienceQA.""" - - choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'} - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - 'Question: ' + data_sample.get('question') + '\n' - for data_sample in data_samples - ] # noqa - choices = [data_sample.get('choices') for data_sample in data_samples] - choices = [[ - f'({self.choice_mapping[i]}) ' + item - for i, item in enumerate(choice) - ] for choice in choices] - choices = [ - 'Choices: ' + ' '.join(choice) + '\n' for choice in choices - ] # noqa - contexts = [ - 'Context: ' + data_sample.get('hint') + '\n' - for data_sample in data_samples - ] # noqa - question = questions[0] - choice = choices[0] - context = contexts[0] - prompt = f'{self.image_token}{self.user_label} {context} {question} {choice} The answer is {self.model_label}:{self.reply_token}' # noqa - return prompt - - -class OTTERVQAPromptConstructor(OTTERMMBenchPromptConstructor): - """Prompt constructor for OTTER on VQA.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = f'{self.image_token}{self.user_label} {question}. Answer it with with few words. {self.model_label}:{self.reply_token}' # noqa - return prompt - - -class OTTERVSRPromptConstructor(OTTERMMBenchPromptConstructor): - """Prompt constructor for OTTER on VSR.""" - - def _process(self, data_samples: List[DataSample]) -> str: - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = f'{self.image_token}{self.user_label} {question}. Is the above description correct? Answer yes or no. {self.model_label}:{self.reply_token}' # noqa - return prompt - - -class OTTERSEEDBenchPromptConstructor(OTTERMMBenchPromptConstructor): - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - assert len(data_samples) == 1, 'Only support batch size 1.' - questions = [ - data_sample.get('question') for data_sample in data_samples - ] - question = questions[0] - prompt = f'{self.image_token}{self.user_label} {question} {self.model_label}:{self.reply_token}' # noqa - return prompt - - -class OTTERMMEPromptConstructor(OTTERMMBenchPromptConstructor): - """Prompt constructor for OTTER on MME. - - Args: - image_prompt (str): Image prompt. Defaults to `''`. - reply_prompt (str): Reply prompt. Defaults to `''`. - """ - - def _process(self, data_samples: List[DataSample]) -> str: - """Process data sample to prompt. - - Args: - data_samples (List[DataSample]): A list of data_samples. - - Returns: - str: Prompt. - """ - assert len(data_samples) == 1, 'Only support batch size 1.' - question = data_samples[0].get('question') - prompt = f'{self.image_token}{self.user_label} {question} {self.model_label}:{self.reply_token}' # noqa - return prompt diff --git a/opencompass/multimodal/models/qwen/__init__.py b/opencompass/multimodal/models/qwen/__init__.py deleted file mode 100644 index 1677731b..00000000 --- a/opencompass/multimodal/models/qwen/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .post_processor import QwenVLBasePostProcessor, QwenVLChatVSRPostProcessor -from .prompt_constructor import (QwenVLChatPromptConstructor, - QwenVLChatScienceQAPromptConstructor, - QwenVLChatVQAPromptConstructor, - QwenVLMMBenchPromptConstructor) -from .qwen import QwenVLBase, QwenVLChat - -__all__ = [ - 'QwenVLBase', 'QwenVLChat', 'QwenVLBasePostProcessor', - 'QwenVLMMBenchPromptConstructor', 'QwenVLChatPromptConstructor', - 'QwenVLChatVQAPromptConstructor', 'QwenVLChatVSRPostProcessor', - 'QwenVLChatScienceQAPromptConstructor' -] diff --git a/opencompass/multimodal/models/qwen/generation_utils.py b/opencompass/multimodal/models/qwen/generation_utils.py deleted file mode 100644 index 9bfb83a0..00000000 --- a/opencompass/multimodal/models/qwen/generation_utils.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright (c) Alibaba Cloud. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Generation support.""" - -from typing import List, Tuple, Union - -import torch -from transformers import PreTrainedTokenizer - -# Types. -HistoryType = List[Tuple[str, str]] -TokensType = List[int] -BatchTokensType = List[List[int]] - - -def pad_batch(batch: BatchTokensType, pad_id: int, - seq_length: int) -> BatchTokensType: - for tokens in batch: - context_length = len(tokens) - if context_length < seq_length: - tokens.extend([pad_id] * (seq_length - context_length)) - return batch - - -def get_ltor_masks_and_position_ids( - data: torch.Tensor, - eod_token: int, - reset_position_ids: bool, - reset_attention_mask: bool, - eod_mask_loss: bool, -): - """Build masks and position id for left to right model.""" - - # Extract batch size and sequence length. - micro_batch_size, seq_length = data.size() - - # Attention mask (lower triangular). - if reset_attention_mask: - att_mask_batch = micro_batch_size - else: - att_mask_batch = 1 - attention_mask = torch.tril( - torch.ones((att_mask_batch, seq_length, seq_length), - device=data.device)).view(att_mask_batch, 1, seq_length, - seq_length) - - # Loss mask. - loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device) - if eod_mask_loss: - loss_mask[data == eod_token] = 0.0 - - # Position ids. - position_ids = torch.arange(seq_length, - dtype=torch.long, - device=data.device) - position_ids = position_ids.unsqueeze(0).expand_as(data) - # We need to clone as the ids will be modified based on batch index. - if reset_position_ids: - position_ids = position_ids.clone() - - if reset_position_ids or reset_attention_mask: - # Loop through the batches: - for b in range(micro_batch_size): - - # Find indices where EOD token is. - eod_index = position_ids[b, data[b] == eod_token] - # Detach indices from positions if going to modify positions. - if reset_position_ids: - eod_index = eod_index.clone() - - # Loop through EOD indices: - prev_index = 0 - for j in range(eod_index.size()[0]): - i = eod_index[j] - # Mask attention loss. - if reset_attention_mask: - attention_mask[b, 0, (i + 1):, :(i + 1)] = 0 - # Reset positions. - if reset_position_ids: - position_ids[b, (i + 1):] -= i + 1 - prev_index - prev_index = i + 1 - - # Convert attention mask to binary: - attention_mask = attention_mask < 0.5 - - return attention_mask, loss_mask, position_ids - - -def get_batch(context_tokens: torch.LongTensor, eod_id: int): - """Generate batch from context tokens.""" - # Move to GPU. - tokens = context_tokens.contiguous().to(context_tokens.device) - # Get the attention mask and position ids. - attention_mask, _, position_ids = get_ltor_masks_and_position_ids( - tokens, - eod_id, - reset_position_ids=False, - reset_attention_mask=False, - eod_mask_loss=False, - ) - return tokens, attention_mask, position_ids - - -def get_stop_words_ids(chat_format: str, tokenizer: PreTrainedTokenizer): - if chat_format == 'raw': - stop_words_ids = [tokenizer.encode('Human:'), [tokenizer.eod_id]] - elif chat_format == 'chatml': - stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]] - else: - raise NotImplementedError(f'Unknown chat format {chat_format!r}') - return stop_words_ids - - -def make_context( - tokenizer: PreTrainedTokenizer, - query: str, - history: List[Tuple[str, str]] = None, - system: str = '', - max_window_size: int = 6144, - chat_format: str = 'chatml', -): - if history is None: - history = [] - - if chat_format == 'chatml': - im_start, im_end = '<|im_start|>', '<|im_end|>' - im_start_tokens = [tokenizer.im_start_id] - im_end_tokens = [tokenizer.im_end_id] - nl_tokens = tokenizer.encode('\n') - - def _tokenize_str(role, content): - return f'{role}\n{content}', tokenizer.encode( - role, allowed_special=set( - tokenizer.IMAGE_ST)) + nl_tokens + tokenizer.encode( - content, allowed_special=set(tokenizer.IMAGE_ST)) - - system_text, system_tokens_part = _tokenize_str('system', system) - system_tokens = im_start_tokens + system_tokens_part + im_end_tokens - - raw_text = '' - context_tokens = [] - - for turn_query, turn_response in reversed(history): - query_text, query_tokens_part = _tokenize_str('user', turn_query) - query_tokens = im_start_tokens + query_tokens_part + im_end_tokens - if turn_response is not None: - response_text, response_tokens_part = _tokenize_str( - 'assistant', turn_response) - response_tokens = im_start_tokens + response_tokens_part + im_end_tokens # noqa - - next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens # noqa - prev_chat = ( - f'\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}' # noqa - ) - else: - next_context_tokens = nl_tokens + query_tokens + nl_tokens - prev_chat = f'\n{im_start}{query_text}{im_end}\n' - - current_context_size = (len(system_tokens) + - len(next_context_tokens) + - len(context_tokens)) - if current_context_size < max_window_size: - context_tokens = next_context_tokens + context_tokens - raw_text = prev_chat + raw_text - else: - break - - context_tokens = system_tokens + context_tokens - raw_text = f'{im_start}{system_text}{im_end}' + raw_text - context_tokens += (nl_tokens + im_start_tokens + - _tokenize_str('user', query)[1] + im_end_tokens + - nl_tokens + im_start_tokens + - tokenizer.encode('assistant') + nl_tokens) - raw_text += f'\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n' - - elif chat_format == 'raw': - raw_text = query - context_tokens = tokenizer.encode(raw_text) - else: - raise NotImplementedError(f'Unknown chat format {chat_format!r}') - - return raw_text, context_tokens - - -def _decode_default( - tokens: List[int], - *, - stop_words: List[str], - eod_words: List[str], - tokenizer: PreTrainedTokenizer, - raw_text_len: int, - verbose: bool = False, - return_end_reason: bool = False, - errors: str = 'replace', -): - trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:] - if verbose: - print('\nRaw Generate: ', trim_decode_tokens) - - end_reason = f'Gen length {len(tokens)}' - for stop_word in stop_words: - trim_decode_tokens = trim_decode_tokens.replace(stop_word, '').strip() - for eod_word in eod_words: - if eod_word in trim_decode_tokens: - end_reason = f'Gen {eod_word!r}' - trim_decode_tokens = trim_decode_tokens.split(eod_word)[0] - trim_decode_tokens = trim_decode_tokens.strip() - if verbose: - print('\nEnd Reason:', end_reason) - print('\nGenerate: ', trim_decode_tokens) - - if return_end_reason: - return trim_decode_tokens, end_reason - else: - return trim_decode_tokens - - -def _decode_chatml(tokens: List[int], - *, - stop_words: List[str], - eod_token_ids: List[int], - tokenizer: PreTrainedTokenizer, - raw_text_len: int, - context_length: int, - verbose: bool = False, - return_end_reason: bool = False, - errors: str = 'replace'): - end_reason = f'Gen length {len(tokens)}' - eod_token_idx = context_length - for eod_token_idx in range(context_length, len(tokens)): - if tokens[eod_token_idx] in eod_token_ids: - end_reason = f'Gen {tokenizer.decode([tokens[eod_token_idx]])!r}' - break - - trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], - errors=errors)[raw_text_len:] - if verbose: - print('\nRaw Generate w/o EOD:', - tokenizer.decode(tokens, errors=errors)[raw_text_len:]) - print('\nRaw Generate:', trim_decode_tokens) - print('\nEnd Reason:', end_reason) - for stop_word in stop_words: - trim_decode_tokens = trim_decode_tokens.replace(stop_word, '').strip() - trim_decode_tokens = trim_decode_tokens.strip() - if verbose: - print('\nGenerate:', trim_decode_tokens) - - if return_end_reason: - return trim_decode_tokens, end_reason - else: - return trim_decode_tokens - - -def decode_tokens( - tokens: Union[torch.LongTensor, TokensType], - tokenizer: PreTrainedTokenizer, - raw_text_len: int, - context_length: int, - chat_format: str, - verbose: bool = False, - return_end_reason: bool = False, - errors: str = 'replace', -) -> str: - if torch.is_tensor(tokens): - tokens = tokens.cpu().numpy().tolist() - - if chat_format == 'chatml': - return _decode_chatml( - tokens, - stop_words=[], - eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id], - tokenizer=tokenizer, - raw_text_len=raw_text_len, - context_length=context_length, - verbose=verbose, - return_end_reason=return_end_reason, - errors=errors, - ) - elif chat_format == 'raw': - return _decode_default( - tokens, - stop_words=['<|endoftext|>'], - eod_words=['<|endoftext|>'], - tokenizer=tokenizer, - raw_text_len=raw_text_len, - verbose=verbose, - return_end_reason=return_end_reason, - errors=errors, - ) - else: - raise NotImplementedError(f'Unknown chat format {chat_format!r}') diff --git a/opencompass/multimodal/models/qwen/post_processor.py b/opencompass/multimodal/models/qwen/post_processor.py deleted file mode 100644 index e6b5525f..00000000 --- a/opencompass/multimodal/models/qwen/post_processor.py +++ /dev/null @@ -1,31 +0,0 @@ -from typing import Any - -import torch - - -class QwenVLBasePostProcessor: - """Post processor for Qwen-VL-Base.""" - - def __init__(self) -> None: - pass - - def __call__(self, pred: torch.tensor, tokenizer: Any, - input_len: int) -> str: - response = self.tokenizer.decode(pred)[input_len:] - response = response.replace('<|endoftext|>', '').strip() - return response - - -class QwenVLChatVSRPostProcessor: - """VSR post processor for Qwen-VL-Chat.""" - - def __init__(self) -> None: - pass - - def __call__(self, response: str) -> str: - if 'yes' in response.lower(): - return 'yes' - elif 'no' in response.lower(): - return 'no' - else: - return 'unknown' diff --git a/opencompass/multimodal/models/qwen/prompt_constructor.py b/opencompass/multimodal/models/qwen/prompt_constructor.py deleted file mode 100644 index aa06ff26..00000000 --- a/opencompass/multimodal/models/qwen/prompt_constructor.py +++ /dev/null @@ -1,100 +0,0 @@ -class QwenVLMMBenchPromptConstructor: - """MMBench prompt constructor for Qwen-VL. - - The output is a dict following the input format of Qwen-VL tokenizer. - """ - - def __init__(self) -> None: - pass - - def __call__(self, inputs: dict) -> list: - data_samples = inputs['data_samples'] - assert len(data_samples) == 1 - data_sample = data_samples[0] - question = data_sample.get('question') - options = data_sample.get('options') - context = data_sample.get('context') - if context is not None: - prompt = context + ' ' + question + ' ' + options - else: - prompt = question + ' ' + options - format_input = [ - { - 'image': 'This_is_path_to_an_image.' - }, # Just placeholder for Image Tokens - { - 'text': prompt - }, - ] - return format_input - - -class QwenVLChatPromptConstructor: - """Prompt constructorfor Qwen-VL-Chat.""" - - def __init__(self, prompt='') -> None: - self.prompt = prompt - - def __call__(self, inputs: dict) -> list: - assert len(inputs['data_samples']) == 1 - format_input = [ - { - 'image': 'This_is_path_to_an_image.' - }, # Just placeholder for Image Tokens - { - 'text': self.prompt - }, - ] - return format_input - - -class QwenVLChatVQAPromptConstructor: - """VQA prompt constructor for Qwen-VL-Chat.""" - - def __init__(self, prompt='') -> None: - self.prompt = prompt - - def __call__(self, inputs: dict) -> list: - data_samples = inputs['data_samples'] - assert len(data_samples) == 1 - data_sample = data_samples[0] - question = data_sample.get('question') - format_input = [ - { - 'image': 'This_is_path_to_an_image.' - }, # Just placeholder for Image Tokens - { - 'text': question + self.prompt - }, - ] - return format_input - - -class QwenVLChatScienceQAPromptConstructor: - """ScienceQA prompt constructor for Qwen-VL-Chat.""" - choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'} - - def __init__(self, prompt='') -> None: - self.prompt = prompt - - def __call__(self, inputs: dict) -> list: - data_samples = inputs['data_samples'] - assert len(data_samples) == 1 - data_sample = data_samples[0] - question = data_sample.get('question') - choices = data_sample.get('choices') - choices = [ - f'({self.choice_mapping[i]}) ' + item - for i, item in enumerate(choices) - ] - choices = 'Choices: ' + ' '.join(choices) + '\n' - contexts = 'Context: ' + data_sample.get('hint') - format_input = [ - { - 'image': 'This_is_path_to_an_image.' - }, # Just placeholder for Image Tokens - { - 'text': contexts + question + choices + self.prompt - }, - ] - return format_input diff --git a/opencompass/multimodal/models/qwen/qwen.py b/opencompass/multimodal/models/qwen/qwen.py deleted file mode 100644 index 5c75ed4a..00000000 --- a/opencompass/multimodal/models/qwen/qwen.py +++ /dev/null @@ -1,329 +0,0 @@ -import types -from typing import Optional, Tuple - -import mmengine -import torch -import torch.nn as nn -from mmengine.device import get_device -from transformers import AutoModelForCausalLM, AutoTokenizer -from transformers.generation import GenerationConfig -from transformers.modeling_outputs import BaseModelOutputWithPast - -from opencompass.registry import MM_MODELS - -from .generation_utils import decode_tokens, make_context - - -@MM_MODELS.register_module('qwen-vl-base') -class QwenVLBase(nn.Module): - """Inference code of Qwen-VL. - - We load the Qwen model via Huggingface. - Args: - pretrained_path (str): Path to Qwen checkpoint or repo id. - prompt_constructor (dict): The config of prompt constructor. - post_processor (dict): The config of post processor. - is_caption_task (bool): Whether the task is caption task. - Defaults to False. - commit_id (str): Use given version of Qwen-VL. - Warning: the latest version may have some conflicts. - Recommend to use the given default version. - """ - - def __init__( - self, - pretrained_path: str, - prompt_constructor: dict = None, - post_processor: dict = None, - is_caption_task: bool = False, - commit_id: str = '548275c8b99de56dec203c0e793be18e030f2f4c' - ) -> None: - super().__init__() - self.tokenizer = AutoTokenizer.from_pretrained(pretrained_path, - trust_remote_code=True, - revision=commit_id) - self.model = AutoModelForCausalLM.from_pretrained( - pretrained_path, - device_map=get_device(), - trust_remote_code=True, - revision=commit_id) - self.model.generation_config = GenerationConfig.from_pretrained( - pretrained_path, trust_remote_code=True, revision=commit_id) - if prompt_constructor is not None: - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - if post_processor is not None: - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - else: - self.post_processor = None - self.is_caption_task = is_caption_task - self.model.transformer.forward = types.MethodType( - forward_hack, self.model.transformer) - - def _build_embeds(self, images, input_ids): - # encode image - images = self.model.transformer.visual(images) - # compute image position - bos_pos = torch.where(input_ids == self.model.transformer.config. - visual['image_start_id']) - eos_pos = torch.where( - input_ids == - self.model.transformer.config.visual['image_start_id'] + 1) - assert (bos_pos[0] == eos_pos[0]).all() - img_pos = torch.stack((bos_pos[0], bos_pos[1], eos_pos[1]), dim=1) - # embed words - inputs_embeds = self.model.transformer.wte(input_ids) - # embed image tokens - for idx, (i, a, b) in enumerate(img_pos): - inputs_embeds[i][a + 1:b] = images[idx] - return inputs_embeds - - def generate(self, batch): - images = batch.pop('inputs') - images = torch.stack(images, dim=0) - format_input = self.prompt_constructor(batch) - query = self.tokenizer.from_list_format(format_input) - - inputs = self.tokenizer(query, return_tensors='pt') - inputs = inputs.to(get_device()) - input_ids, token_type_ids, attention_mask = inputs[ - 'input_ids'], inputs['token_type_ids'], inputs['attention_mask'] - inputs_embeds = self._build_embeds(images, input_ids) - pred = self.model.generate(input_ids=input_ids, - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - token_type_ids=token_type_ids) - response = self.post_processor(pred.cpu()[0]) - - data_sample = batch['data_samples'][0] - if self.is_caption_task: - data_sample.pred_caption = response - else: - data_sample.pred_answer = response - return data_sample - - def forward(self, batch): - return self.generate(batch) - - -@MM_MODELS.register_module('qwen-vl-chat') -class QwenVLChat(QwenVLBase): - """Inference code of Qwen-VL-Chat. - - We load the Qwen model via Huggingface. - Args: - pretrained_path (str): Path to Qwen checkpoint or repo id. - prompt_constructor (dict): The config of prompt constructor. - post_processor (dict): The config of post processor. - is_caption_task (bool): Whether the task is caption task. - Defaults to False. - """ - - def __init__(self, - pretrained_path: str, - prompt_constructor: dict = None, - post_processor: dict = None, - is_caption_task: bool = False) -> None: - super().__init__(pretrained_path, prompt_constructor, post_processor, - is_caption_task) - - def generate(self, batch): - images = batch.pop('inputs') - images = torch.stack(images, dim=0) - format_input = self.prompt_constructor(batch) - query = self.tokenizer.from_list_format(format_input) - - raw_text, context_tokens = make_context( - self.tokenizer, - query, - system='You are a helpful assistant.', - chat_format=self.model.generation_config.chat_format, - ) - - input_ids = torch.tensor([context_tokens]).to(get_device()) - - inputs_embeds = self._build_embeds(images, input_ids) - pred = self.model.generate(input_ids=input_ids, - inputs_embeds=inputs_embeds) - - response = decode_tokens( - pred[0], - self.tokenizer, - raw_text_len=len(raw_text), - context_length=len(context_tokens), - chat_format=self.model.generation_config.chat_format, - verbose=False, - errors='replace') - - if self.post_processor: - response = self.post_processor(response) - - data_sample = batch['data_samples'][0] - if self.is_caption_task: - data_sample.pred_caption = response - else: - data_sample.pred_answer = response - return data_sample - - -def forward_hack(self, - input_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.Tensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None): - if past_key_values is None and input_ids is not None and torch.any( - input_ids == self.config.visual['image_start_id']): - bos_pos = torch.where( - input_ids == self.config.visual['image_start_id']) - eos_pos = torch.where( - input_ids == self.config.visual['image_start_id'] + 1) - assert (bos_pos[0] == eos_pos[0]).all() - img_pos = torch.stack((bos_pos[0], bos_pos[1], eos_pos[1]), dim=1) - images = [] - for i, a, b in img_pos: - image = input_ids[i][a + 1:b - 1].tolist() - image = image[:image.index(self.config.visual['image_start_id'] + - 2)] - images.append(bytes(image).decode('utf-8')) - - images = self.visual.encode(images) - assert images.shape[0] == len(images) - else: - images = None - - output_attentions = (output_attentions if output_attentions is not None - else self.config.output_attentions) - output_hidden_states = (output_hidden_states if output_hidden_states - is not None else self.config.output_hidden_states) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = (return_dict - if return_dict is not None else self.config.use_return_dict) - - if input_ids is not None and inputs_embeds is not None: - raise ValueError( - 'You cannot specify both input_ids and inputs_embeds at the same time' # noqa - ) - elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - batch_size = input_ids.shape[0] - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - batch_size = inputs_embeds.shape[0] - else: - raise ValueError( - 'You have to specify either input_ids or inputs_embeds') - - device = input_ids.device if input_ids is not None else inputs_embeds.device # noqa - - if token_type_ids is not None: - token_type_ids = token_type_ids.view(-1, input_shape[-1]) - if position_ids is not None: - position_ids = position_ids.view(-1, input_shape[-1]) - - if past_key_values is None: - past_length = 0 - past_key_values = tuple([None] * len(self.h)) - else: - past_length = past_key_values[0][0].size(-2) - - if position_ids is None: - position_ids = torch.arange( - past_length, - input_shape[-1] + past_length, - dtype=torch.long, - device=device, - ) - position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) - - encoder_attention_mask = None - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - if inputs_embeds is None: - inputs_embeds = self.wte(input_ids) - - if batch_size <= 0: - raise ValueError('batch_size has to be defined and > 0') - attention_mask = self._prepare_decoder_attention_mask( - attention_mask, input_shape, inputs_embeds, past_length) - - hidden_states = inputs_embeds - - hidden_states = self.drop(hidden_states) - if images is not None: - for idx, (i, a, b) in enumerate(img_pos): - hidden_states[i][a + 1:b] = images[idx] - output_shape = input_shape + (hidden_states.size(-1), ) - - presents = () if use_cache else None - all_self_attentions = () if output_attentions else None - all_hidden_states = () if output_hidden_states else None - for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states, ) - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - - def custom_forward(*inputs): - # None for past_key_value - return module(*inputs, use_cache, output_attentions) - - return custom_forward - - outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(block), - hidden_states, - None, - attention_mask, - head_mask[i], - encoder_hidden_states, - encoder_attention_mask, - ) - else: - outputs = block( - hidden_states, - layer_past=layer_past, - attention_mask=attention_mask, - head_mask=head_mask[i], - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - use_cache=use_cache, - output_attentions=output_attentions, - ) - - hidden_states = outputs[0] - if use_cache is True: - presents = presents + (outputs[2 if output_attentions else 1], ) - - if output_attentions: - all_self_attentions = all_self_attentions + (outputs[1], ) - - hidden_states = self.ln_f(hidden_states) - hidden_states = hidden_states.view(output_shape) - # Add last hidden state - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states, ) - - if not return_dict: - return tuple(v for v in [hidden_states, presents, all_hidden_states] - if v is not None) - - return BaseModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=presents, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) diff --git a/opencompass/multimodal/models/visualglm/__init__.py b/opencompass/multimodal/models/visualglm/__init__.py deleted file mode 100644 index e2d6753a..00000000 --- a/opencompass/multimodal/models/visualglm/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .post_processor import (VisualGLMBasePostProcessor, - VisualGLMVSRPostProcessor) -from .prompt_constructor import (VisualGLMBasePromptConstructor, - VisualGLMIconQAPromptConstructor, - VisualGLMMMBenchPromptConstructor, - VisualGLMScienceQAPromptConstructor, - VisualGLMVQAPromptConstructor) -from .visualglm import VisualGLM - -__all__ = [ - 'VisualGLM', 'VisualGLMBasePostProcessor', 'VisualGLMVSRPostProcessor', - 'VisualGLMBasePromptConstructor', 'VisualGLMMMBenchPromptConstructor', - 'VisualGLMVQAPromptConstructor', 'VisualGLMScienceQAPromptConstructor', - 'VisualGLMIconQAPromptConstructor' -] diff --git a/opencompass/multimodal/models/visualglm/post_processor.py b/opencompass/multimodal/models/visualglm/post_processor.py deleted file mode 100644 index 8289fc8a..00000000 --- a/opencompass/multimodal/models/visualglm/post_processor.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Any - -import torch - - -class VisualGLMBasePostProcessor: - """Base post processor for VisualGLM.""" - - def __init__(self) -> None: - pass - - def __call__(self, output_token: torch.tensor, tokenizer: Any) -> str: - return tokenizer.decode(output_token) - - -class VisualGLMVSRPostProcessor(VisualGLMBasePostProcessor): - """VSR post processor for VisualGLM.""" - - def __init__(self) -> None: - super().__init__() - - def __call__(self, output_token: torch.tensor, tokenizer: Any) -> str: - output_text = tokenizer.decode(output_token) - if 'yes' in output_text.lower(): - return 'yes' - elif 'no' in output_text.lower(): - return 'no' - else: - return 'unknown' diff --git a/opencompass/multimodal/models/visualglm/prompt_constructor.py b/opencompass/multimodal/models/visualglm/prompt_constructor.py deleted file mode 100644 index 68bea8e4..00000000 --- a/opencompass/multimodal/models/visualglm/prompt_constructor.py +++ /dev/null @@ -1,208 +0,0 @@ -class VisualGLMMMBenchPromptConstructor: - """MMBench prompt constructor for VisualGLM. - - Args: - system_prompt (str): System prompt. (Default: '') - human_prompt (str): Human prompt. (Default: 'Q:') - assistant_prompt (str): Assistant prompt. (Default: 'A:') - """ - - def __init__(self, - system_prompt: str = '', - human_prompt: str = 'Q:', - assistant_prompt: str = 'A:') -> None: - self.system_prompt = system_prompt - self.human_prompt = human_prompt - self.assistant_prompt = assistant_prompt - - def __call__(self, batch: dict) -> tuple: - """Construct prompt. - - Args: - batch (dict): Input data containing image and data_samples. - - Returns: - A tuple containing images, prompt, data_samples and image_position. - """ - - assert len(batch['inputs']) == 1 - image = batch.pop('inputs')[0].unsqueeze(0) - data_sample = batch.pop('data_samples')[0] - img_prompt = '' - if data_sample.get('context') is not None: - prompt = img_prompt + self.system_prompt + self.human_prompt + data_sample.context + ' ' + data_sample.question + ' ' + data_sample.options # noqa - else: - prompt = img_prompt + self.system_prompt + self.human_prompt + data_sample.question + ' ' + data_sample.options # noqa - prompt += self.assistant_prompt - image_position = prompt.rfind('') + 5 - - return image, prompt, data_sample, image_position - - -class VisualGLMBasePromptConstructor: - """Base prompt constructor for VisualGLM. - - The prompt will concat and the given system prompt. - Args: - system_prompt (str): System prompt. (Default: '') - human_prompt (str): Human prompt. (Default: 'Q:') - assistant_prompt (str): Assistant prompt. (Default: 'A:') - """ - - def __init__(self, - system_prompt: str = '', - human_prompt: str = 'Q:', - assistant_prompt: str = 'A:') -> None: - self.prompt = system_prompt - self.human_prompt = human_prompt - self.assistant_prompt = assistant_prompt - - def __call__(self, batch: dict) -> tuple: - """Construct prompt. - - Args: - batch (dict): Input data containing image and data_samples. - - Returns: - A tuple containing images, prompt, data_samples and image_position. - """ - - assert len(batch['inputs']) == 1 - image = batch.pop('inputs')[0].unsqueeze(0) - data_sample = batch.pop('data_samples')[0] - - # generate text prompt - prompt = '' + self.human_prompt + self.prompt + self.assistant_prompt # noqa - - image_position = prompt.rfind('') + 5 - - return image, prompt, data_sample, image_position - - -class VisualGLMVQAPromptConstructor(VisualGLMBasePromptConstructor): - """VQA prompt constructor for VisualGLM. - - The prompt will concat , the question and the system prompt. - Args: - system_prompt (str): System prompt. (Default: '') - human_prompt (str): Human prompt. (Default: 'Q:') - assistant_prompt (str): Assistant prompt. (Default: 'A:') - """ - - def __init__(self, - system_prompt='', - human_prompt: str = 'Q:', - assistant_prompt: str = 'A:') -> None: - super().__init__(system_prompt, human_prompt, assistant_prompt) - - def __call__(self, batch: dict) -> tuple: - """Construct prompt. - - Args: - batch (dict): Input data containing image and data_samples. - - Returns: - A tuple containing images, prompt, data_samples and image_position. - """ - - assert len(batch['inputs']) == 1 - image = batch.pop('inputs')[0].unsqueeze(0) - data_sample = batch.pop('data_samples')[0] - - # generate text prompt - question = data_sample.get('question') - prompt = '' + self.human_prompt + question + self.prompt - prompt += '\n' + self.assistant_prompt - - image_position = prompt.rfind('') + 5 - - return image, prompt, data_sample, image_position - - -class VisualGLMScienceQAPromptConstructor(VisualGLMBasePromptConstructor): - """ScienceQA prompt constructor for VisualGLM. - - The prompt will concat image and all terms in a question. - Args: - system_prompt (str): System prompt. (Default: '') - human_prompt (str): Human prompt. (Default: 'Q:') - assistant_prompt (str): Assistant prompt. (Default: 'A:') - """ - - choice_mapping = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F'} - - def __init__(self, - system_prompt='', - human_prompt: str = 'Q:', - assistant_prompt: str = 'A:') -> None: - super().__init__(system_prompt, human_prompt, assistant_prompt) - - def __call__(self, batch: dict) -> tuple: - """Construct prompt. - - Args: - batch (dict): Input data containing image and data_samples. - - Returns: - A tuple containing images, prompt, data_samples and image_position. - """ - - assert len(batch['inputs']) == 1 - image = batch.pop('inputs')[0].unsqueeze(0) - data_sample = batch.pop('data_samples')[0] - - questions = 'Question: ' + data_sample.get('question') - choices = data_sample.get('choices') - choices = [ - f'({self.choice_mapping[i]}) ' + item - for i, item in enumerate(choices) - ] - choices = 'Choices: ' + ' '.join(choices) + '\n' - contexts = 'Context: ' + data_sample.get('hint') + '\n' - - # generate text prompt - prompt = '' + self.human_prompt + contexts + questions + choices + self.prompt + self.assistant_prompt # noqa - image_position = prompt.rfind('') + 5 - - return image, prompt, data_sample, image_position - - -class VisualGLMIconQAPromptConstructor(VisualGLMBasePromptConstructor): - """IconQA prompt constructor for VisualGLM. - - The prompt will concat , the question and the system prompt. - Args: - system_prompt (str): System prompt. (Default: '') - human_prompt (str): Human prompt. (Default: 'Q:') - assistant_prompt (str): Assistant prompt. (Default: 'A:') - """ - - def __init__(self, - system_prompt='', - human_prompt: str = 'Q:', - assistant_prompt: str = 'A:') -> None: - super().__init__(system_prompt, human_prompt, assistant_prompt) - - def __call__(self, batch: dict) -> tuple: - """Construct prompt. - - Args: - batch (dict): Input data containing image and data_samples. - - Returns: - A tuple containing images, prompt, data_samples and image_position. - """ - - assert len(batch['inputs']) == 1 - image = batch.pop('inputs')[0].unsqueeze(0) - data_sample = batch.pop('data_samples')[0] - - questions = data_sample.get('question') + '\n' - choices = data_sample.get('choices') - choices = 'Options: ' + ', '.join(choices) + '.\n' - - # generate text prompt - prompt = '' + self.human_prompt + questions + choices + self.prompt + self.assistant_prompt # noqa - image_position = prompt.rfind('') + 5 - - return image, prompt, data_sample, image_position diff --git a/opencompass/multimodal/models/visualglm/visualglm.py b/opencompass/multimodal/models/visualglm/visualglm.py deleted file mode 100644 index a9534d94..00000000 --- a/opencompass/multimodal/models/visualglm/visualglm.py +++ /dev/null @@ -1,104 +0,0 @@ -from typing import Optional - -import mmengine -import torch -import torch.nn as nn -from mmengine.device import get_device -from transformers import AutoModel, AutoTokenizer - -from opencompass.registry import MM_MODELS - - -@MM_MODELS.register_module('visualglm') -class VisualGLM(nn.Module): - """Inference code of VisualGLM. - - We load the visualGLM model via Huggingface. - Args: - pretrained_path (str): Path to visualGLM checkpoint or repo id. - prompt_constructor (dict): The config of prompt constructor. - post_processor (dict): The config of post processor. - is_caption_task (bool): Whether the task is caption task. - Defaults to False. - gen_kwargs (dict): Customize generate function arguments. - Defaults to None. - """ - - def __init__(self, - pretrained_path: str, - prompt_constructor: dict, - post_processor: dict, - is_caption_task: bool = False, - gen_kwargs: Optional[dict] = None) -> None: - super().__init__() - self.tokenizer = AutoTokenizer.from_pretrained(pretrained_path, - trust_remote_code=True) - self.model = AutoModel.from_pretrained(pretrained_path, - trust_remote_code=True).half() - self.prompt_constructor = mmengine.registry.build_from_cfg( - prompt_constructor, MM_MODELS) - self.post_processor = mmengine.registry.build_from_cfg( - post_processor, MM_MODELS) - - if gen_kwargs: - self.gen_kwargs = gen_kwargs - else: - self.gen_kwargs = dict(max_length=1024, - min_length=100, - do_sample=True, - temperature=0.8, - top_p=0.4, - top_k=100, - repetition_penalty=1.2) - - self.is_caption_task = is_caption_task - - def encode_by_tokenizer(self, prompt, image_position): - - input0 = self.tokenizer.encode(prompt[:image_position], - add_special_tokens=False) - input1 = [self.tokenizer.unk_token_id] * self.model.image_length - input2 = self.tokenizer.encode(prompt[image_position:], - add_special_tokens=False) - input_all = sum([input0, input1, input2], []) - input_all = self.tokenizer.build_inputs_with_special_tokens(input_all) - input_all = torch.tensor(input_all, dtype=torch.long).to(get_device()) - input_all = input_all.unsqueeze(0) - - pre_image_len = len(input0) - - return input_all, pre_image_len - - def generate(self, batch): - # process input - image, prompt, data_sample, image_position = self.prompt_constructor( - batch) - image = image.to(self.model.dtype).to(get_device()) - - # tokenize - input_all, pre_image_len = self.encode_by_tokenizer( - prompt, image_position) - - # build input param - inputs = { - 'input_ids': input_all, - 'pre_image_length': pre_image_len, - 'images': image - } - - # generate answer - outputs = self.model.generate(**inputs, **self.gen_kwargs) - - # format output - outputs = outputs.tolist()[0][input_all.shape[1]:] - answer = self.post_processor(outputs, self.tokenizer) - - if self.is_caption_task: - data_sample.pred_caption = answer - else: - data_sample.pred_answer = answer - - return data_sample - - def forward(self, batch): - return self.generate(batch) diff --git a/opencompass/partitioners/__init__.py b/opencompass/partitioners/__init__.py index eb2df33b..ead3704d 100644 --- a/opencompass/partitioners/__init__.py +++ b/opencompass/partitioners/__init__.py @@ -1,4 +1,3 @@ -from .mm_naive import * # noqa: F401, F403 from .naive import * # noqa: F401, F403 from .num_worker import * # noqa: F401, F403 from .size import * # noqa: F401, F403 diff --git a/opencompass/partitioners/mm_naive.py b/opencompass/partitioners/mm_naive.py deleted file mode 100644 index 817b1276..00000000 --- a/opencompass/partitioners/mm_naive.py +++ /dev/null @@ -1,119 +0,0 @@ -from copy import deepcopy -from typing import Dict, List - -from mmengine.config import Config, ConfigDict - -from opencompass.registry import PARTITIONERS - -from .base import BasePartitioner - - -@PARTITIONERS.register_module() -class MultimodalNaivePartitioner(BasePartitioner): - """Multimodal naive task partitioner. - - This partitioner will generate a task for each - model-dataset-evaluator pair. - - Args: - config (ConfigDict): The full config dict. - """ - - def partition(self, models: List[ConfigDict], datasets: List[ConfigDict], - evaluators: List[ConfigDict], load_froms: List[ConfigDict], - work_dir: str, num_gpus: int, num_procs: int, - launcher: str) -> List[Dict]: - """Partition model-dataset pairs into tasks. Each task is defined as a - dict and will run independently as a unit. Its structure is as follows: - - .. code-block:: python - - { - 'models': [], # a list of model configs - 'datasets': [], # a list of dataset configs - 'evaluators': [], # a list of evaluator configs - 'load_froms': [], # a list of load_from paths - 'work_dir': '', # the work dir - 'num_gpus': int, # integer, number of gpus for each task - 'num_procs': int, # integer, number of gpus on single machine - 'launcher': str, # string, how to launch distributed training - } - - Args: - models (List[ConfigDict]): A list of model configs. - datasets (List[ConfigDict]): A list of dataset configs. - evaluators (List[ConfigDict]): A list of evaluator configs. - load_froms (List[ConfigDict]): A list of load_from paths. - work_dir (str): The work dir for the task. - num_gpus (int): Number of gpus for each task. - num_procs (int): Number of gpus on single machine. - launcher (str): How to launch distributed training. - Only `slurm`, `pytorch` and `mpi` are available. - - Returns: - List[Dict]: A list of tasks. - """ - - tasks = [] - for model, dataset, evaluator, load_from in zip( - models, datasets, evaluators, load_froms): - task = Config({ - 'model': model, - 'dataset': dataset, - 'evaluator': evaluator, - 'load_from': load_from, - 'work_dir': work_dir, - 'num_gpus': num_gpus, - 'num_procs': num_procs, - 'launcher': launcher - }) - tasks.append(task) - - return tasks - - def __call__(self, cfg: ConfigDict) -> List[Dict]: - """Generate tasks from config. Each task is defined as a - dict and will run independently as a unit. Its structure is as - follows: - - .. code-block:: python - - { - 'models': [], # a list of model configs - 'datasets': [], # a list of dataset configs - 'evaluators': [], # a list of evaluator configs - 'load_froms': [], # a list of load_from paths - 'work_dir': '', # the work dir - 'num_gpus': int, # integer, number of gpus for each task - 'num_procs': int, # integer, number of gpus on single machine - } - - Args: - cfg (ConfigDict): The config dict, containing "models", "dataset" - and "work_dir" keys. - - Returns: - List[Dict]: A list of tasks. - """ - cfg = deepcopy(cfg) - models = cfg['models'] - datasets = cfg['datasets'] - evaluators = cfg['evaluators'] - load_froms = cfg['load_froms'] - work_dir = cfg['work_dir'] - num_gpus = cfg['num_gpus'] - num_procs = cfg['num_procs'] - launcher = cfg['launcher'] - - tasks = self.partition(models, datasets, evaluators, load_froms, - work_dir, num_gpus, num_procs, launcher) - - self.logger.info(f'Partitioned into {len(tasks)} tasks.') - for i, task in enumerate(tasks): - model_name = task['model']['type'] - dataset_name = task['dataset']['dataset']['type'] - evaluator_name = task['evaluator'][0]['type'] - self.logger.debug( - f'Task {i}: {model_name}-{dataset_name}-{evaluator_name}') - - return tasks diff --git a/opencompass/registry.py b/opencompass/registry.py index ceddef83..15ea9a31 100644 --- a/opencompass/registry.py +++ b/opencompass/registry.py @@ -1,8 +1,6 @@ from typing import Callable, List, Optional, Type, Union -from mmengine.registry import DATASETS as MMENGINE_DATASETS from mmengine.registry import METRICS as MMENGINE_METRICS -from mmengine.registry import MODELS as MMENGINE_MODELS from mmengine.registry import Registry as OriginalRegistry @@ -39,15 +37,9 @@ ICL_PROMPT_TEMPLATES = Registry( locations=['opencompass.openicl.icl_prompt_template']) ICL_EVALUATORS = Registry('icl_evaluators', locations=['opencompass.openicl.icl_evaluator']) -DATASETS = Registry('mm_datasets', - parent=MMENGINE_DATASETS, - locations=['opencompass.multimodal.datasets']) METRICS = Registry('metric', parent=MMENGINE_METRICS, locations=['opencompass.metrics']) -MM_MODELS = Registry('mm_model', - parent=MMENGINE_MODELS, - locations=['opencompass.multimodal.models']) TOT_WRAPPER = Registry('tot_wrapper', locations=['opencompass.datasets']) diff --git a/opencompass/tasks/__init__.py b/opencompass/tasks/__init__.py index 40de03ad..035662f7 100644 --- a/opencompass/tasks/__init__.py +++ b/opencompass/tasks/__init__.py @@ -1,4 +1,3 @@ -from .mm_infer import * # noqa: F401, F403 from .openicl_attack import * # noqa: F401, F403 from .openicl_eval import * # noqa: F401, F403 from .openicl_infer import * # noqa: F401, F403 diff --git a/opencompass/tasks/mm_infer.py b/opencompass/tasks/mm_infer.py deleted file mode 100644 index 51c98fac..00000000 --- a/opencompass/tasks/mm_infer.py +++ /dev/null @@ -1,160 +0,0 @@ -import argparse -import json -import os -import os.path as osp -import random -import time -from typing import List, Sequence - -import mmengine -import torch -import torch.distributed as dist -from mmengine.config import Config, ConfigDict -from mmengine.device import get_device -from mmengine.dist import init_dist -from mmengine.evaluator import Evaluator -from mmengine.logging import print_log -from mmengine.model.wrappers import MMDistributedDataParallel -from mmengine.utils import track_iter_progress - -from opencompass.registry import MM_MODELS, TASKS -from opencompass.utils import get_logger - - -def build_model(cfg): - model = MM_MODELS.build(cfg['model']) - load_from = cfg.get('load_from', None) - if load_from is not None: - state_dict = torch.load(cfg['load_from'], map_location='cpu') - if 'model' in state_dict: - state_dict = state_dict['model'] - elif 'state_dict' in state_dict: - state_dict = state_dict['state_dict'] - msg = model.load_state_dict(state_dict, strict=False) - print_log(msg) - model.to(get_device()) - if dist.is_initialized(): - model = MMDistributedDataParallel( - model, - device_ids=[int(os.environ['LOCAL_RANK'])], - broadcast_buffers=False) - return model - - -@TASKS.register_module(force=(__name__ == '__main__')) # A hack for script run -class MultimodalInferTask: - """Multimodal Inference Task. - - This task is used to run the inference process. - """ - - def __init__(self, cfg: ConfigDict): - self.num_gpus = cfg.get('num_gpus', 0) - self.num_procs = cfg.get('num_procs', 1) - self.dataloader = cfg.get('dataset') - self.model = cfg.get('model') - self.evaluator = cfg.get('evaluator') - self.cfg = cfg - self.logger = get_logger() - - @property - def name(self) -> str: - model_name = self.model['type'] - dataset_name = self.dataloader['dataset']['type'] - evaluator_name = self.evaluator[0]['type'] - return f'{model_name}-{dataset_name}-{evaluator_name}' - - def get_log_path(self, file_extension: str = 'json') -> str: - """Get the path to the log file. - - Args: - file_extension (str): The file extension of the log file. - Default: 'json'. - """ - model_name = self.model['type'] - dataset_name = self.dataloader['dataset']['type'] - evaluator_name = self.evaluator[0]['type'] - - return osp.join(self.cfg.work_dir, model_name, dataset_name, - f'{evaluator_name}.{file_extension}') - - def get_output_paths(self, file_extension: str = 'json') -> List[str]: - """Get the path to the output file. - - Args: - file_extension (str): The file extension of the log file. - Default: 'json'. - """ - model_name = self.model['type'] - dataset_name = self.dataloader['dataset']['type'] - evaluator_name = self.evaluator[0]['type'] - - return [ - osp.join(self.cfg.work_dir, model_name, dataset_name, - f'{evaluator_name}.{file_extension}') - ] - - def get_command(self, cfg_path, template): - """Get the command template for the task. - - Args: - cfg_path (str): The path to the config file of the task. - template (str): The template which have '{task_cmd}' to format - the command. - """ - script_path = __file__ - if self.num_gpus > 0: - port = random.randint(12000, 32000) - command = (f'torchrun --master_port={port} ' - f'--nproc_per_node {self.num_procs} ' - f'{script_path} {cfg_path}') - else: - command = f'python {script_path} {cfg_path}' - - return template.format(task_cmd=command) - - def run(self): - from mmengine.runner import Runner - - # only support slurm, pytorch, mpi - init_dist(self.cfg.launcher) - self.logger.info(f'Task {self.name}') - # build dataloader - dataloader = Runner.build_dataloader(self.dataloader) - # build model - model = build_model(self.cfg) - model.eval() - # build evaluator - evaluator = Evaluator(self.evaluator) - - for batch in track_iter_progress(dataloader): - if dist.is_initialized(): - data_samples = model.module.forward(batch) - else: - data_samples = model.forward(batch) - if not isinstance(data_samples, Sequence): - data_samples = [data_samples] - evaluator.process(data_samples) - - metrics = evaluator.evaluate(len(dataloader.dataset)) - metrics_file = self.get_output_paths()[0] - mmengine.mkdir_or_exist(osp.split(metrics_file)[0]) - with open(metrics_file, 'w') as f: - json.dump(metrics, f) - - -def parse_args(): - parser = argparse.ArgumentParser(description='Model Inferencer') - parser.add_argument('config', help='Config file path') - args = parser.parse_args() - return args - - -if __name__ == '__main__': - args = parse_args() - cfg = Config.fromfile(args.config) - start_time = time.time() - inferencer = MultimodalInferTask(cfg) - inferencer.run() - end_time = time.time() - get_logger().info(f'time elapsed: {end_time - start_time:.2f}s') diff --git a/opencompass/utils/run.py b/opencompass/utils/run.py index de6a8724..3e64526f 100644 --- a/opencompass/utils/run.py +++ b/opencompass/utils/run.py @@ -270,27 +270,6 @@ def change_accelerator(models, accelerator): return model_accels -def exec_mm_infer_runner(tasks, args, cfg): - """execute multimodal infer runner according to args.""" - if args.slurm: - runner = SlurmRunner(dict(type='MultimodalInferTask'), - max_num_workers=args.max_num_workers, - partition=args.partition, - quotatype=args.quotatype, - retry=args.retry, - debug=args.debug, - lark_bot_url=cfg['lark_bot_url']) - elif args.dlc: - raise NotImplementedError('Currently, we do not support evaluating \ - multimodal models on dlc.') - else: - runner = LocalRunner(task=dict(type='MultimodalInferTask'), - max_num_workers=args.max_num_workers, - debug=args.debug, - lark_bot_url=cfg['lark_bot_url']) - runner(tasks) - - def get_config_type(obj) -> str: return f'{obj.__module__}.{obj.__name__}' diff --git a/tools/eval_mmbench.py b/tools/eval_mmbench.py deleted file mode 100644 index d141045a..00000000 --- a/tools/eval_mmbench.py +++ /dev/null @@ -1,403 +0,0 @@ -# Usage: python eval_mmbench.py mmbench_dev_inference_result.xlsx -import argparse -import json -import os.path as osp -import pickle -import random as rd -import string -from collections import defaultdict - -import numpy as np -import pandas as pd -from tqdm import tqdm - -from opencompass.models import OpenAI - -fout = None - - -# Utils -def double_log(msg, fout=None): - print(msg) - if fout is not None: - fout.write(str(msg) + '\n') - fout.flush() - - -def dump(data, f): - - def dump_pkl(data, pth): - pickle.dump(data, open(pth, 'wb')) - - def dump_json(data, pth): - json.dump(data, open(pth, 'w')) - - def dump_jsonl(data, f): - lines = [json.dumps(x, ensure_ascii=False) for x in data] - with open(f, 'w', encoding='utf8') as fout: - fout.write('\n'.join(lines)) - - def dump_xlsx(data, f): - data.to_excel(f, index=False) - - def dump_csv(data, f): - data.to_csv(f, index=False) - - def dump_tsv(data, f): - data.to_csv(f, sep='\t', index=False) - - handlers = dict(pkl=dump_pkl, - json=dump_json, - jsonl=dump_jsonl, - xlsx=dump_xlsx, - csv=dump_csv, - tsv=dump_tsv) - suffix = f.split('.')[-1] - return handlers[suffix](data, f) - - -def load(f): - - def load_pkl(pth): - return pickle.load(open(pth, 'rb')) - - def load_json(pth): - return json.load(open(pth, 'r', encoding='utf-8')) - - def load_jsonl(f): - lines = open(f, encoding='utf-8').readlines() - lines = [x.strip() for x in lines] - if lines[-1] == '': - lines = lines[:-1] - data = [json.loads(x) for x in lines] - return data - - def load_xlsx(f): - return pd.read_excel(f) - - def load_csv(f): - return pd.read_csv(f) - - def load_tsv(f): - return pd.read_csv(f, sep='\t') - - handlers = dict(pkl=load_pkl, - json=load_json, - jsonl=load_jsonl, - xlsx=load_xlsx, - csv=load_csv, - tsv=load_tsv) - suffix = f.split('.')[-1] - return handlers[suffix](f) - - -# Accuracy Report -def report_acc(df, group='category'): - assert 'split' in df - assert group in [None, 'category', 'l2-category'] - - res = defaultdict(list) - res['split'] = ['full', 'dev', 'test'] - if group is None: - res['overall'] = [ - np.mean(df['hit']), - np.mean(df[df['split'] == 'dev']['hit']), - np.mean(df[df['split'] == 'test']['hit']) - ] - return pd.DataFrame(res) - - elif group in df: - abilities = list(set(df[group])) - abilities.sort() - for ab in abilities: - sub_df = df[df[group] == ab] - res[ab] = [ - np.mean(sub_df['hit']), - np.mean(sub_df[sub_df['split'] == 'dev']['hit']), - np.mean(sub_df[sub_df['split'] == 'test']['hit']) - ] - return pd.DataFrame(res) - - -# Prompt Building -def build_option_str(option_list): - chars = string.ascii_uppercase - s = 'There are several options: \n' - for c, opt in zip(chars, option_list): - if not pd.isna(opt): - s += f'{c}. {opt}\n' - else: - return s - return s - - -def extract_options(item): - options = [] - for c in 'ABCD': - if c in item and not pd.isna(item[c]): - options.append(item[c]) - else: - return options - return options - - -def build_choices(item): - ret = {} - for ch in 'ABCD': - if not pd.isna(item[ch]): - ret[ch] = item[ch] - return ret - - -def build_prompt(question, options, prediction): - tmpl = ( - 'You are an AI assistant who will help me to match an answer ' - 'with several options of a single-choice question. ' - 'You are provided with a question, several options, and an answer, ' - 'and you need to find which option is most similar to the answer. ' - 'If the meaning of all options are significantly different ' - 'from the answer, output E. ' - 'Your should output a single uppercase character in A, B, C, D ' - '(if they are valid options), and E. \n' - 'Example 1: \n' - 'Question: What is the main object in image?\nOptions: A. teddy bear ' - 'B. rabbit C. cat D. dog\nAnswer: a cute teddy bear\nYour output: A\n' - 'Example 2: \n' - 'Question: What is the main object in image?\nOptions: A. teddy bear ' - 'B. rabbit C. cat D. dog\nAnswer: Spider\nYour output: E\n' - 'Example 3: \n' - 'Question: {}?\nOptions: {}\nAnswer: {}\nYour output: ') - return tmpl.format(question, options, prediction) - - -# Prefetch Answers -def can_infer_option(answer, num_choice=5): - choices = string.ascii_uppercase[:num_choice] - if 'Failed to obtain answer via API' in answer: - return False - - def count(splits, choices='ABCD', prefix='', suffix=''): - cnt = 0 - for c in choices: - if prefix + c + suffix in splits: - cnt += 1 - return cnt - - splits = [x.strip() for x in answer.split()] - if count(splits, choices) == 1: - for ch in choices: - if 'A' in splits and len(splits) > 3: - double_log( - f'A might be a quantifier in the string: {answer}. ', fout) - break - if ch in splits: - return ch - tups = [('', '.'), ('', ','), ('', ':'), ('', ')'), ('', ').'), ('(', ')'), - ('(', ').'), (':', ''), (':', ','), (':', '.'), (':', ')'), - (':', ').')] - for tup in tups: - if count(splits, choices, prefix=tup[0], suffix=tup[1]) == 1: - for ch in choices: - if tup[0] + ch + tup[1] in splits: - return ch - return False - - -def can_infer_text(answer, choices): - answer = answer.lower() - assert isinstance(choices, dict) - for k in choices: - assert k in 'ABCD' - choices[k] = str(choices[k]).lower() - cands = [] - for k in choices: - if choices[k] in answer: - cands.append(k) - if len(cands) == 1: - return cands[0] - return False - - -def can_infer(answer, choices): - copt = can_infer_option(answer) - return copt if copt else can_infer_text(answer, choices) - - -def prefetch_answer(item): - choices = build_choices(item) - return can_infer(item['prediction'], choices) - - -# Extract answer from a single record -def extract_answer_from_item(model, item): - # It will return: (pred, raw, llm_time) - options = extract_options(item) - option_str = build_option_str(options) - - prompt = build_prompt(item['question'], option_str, item['prediction']) - retry = 3 - choices = build_choices(item) - - ret = can_infer(item['prediction'], choices) - if ret: - return ret, item['prediction'] - - while retry: - ans = model.generate([prompt])[0] - if 'Failed to obtain answer via API' in ans: - msg = 'GPT API failed to answer. ' - double_log(msg, fout) - retry -= 1 - else: - ret = can_infer(ans, choices) - if ret: - return ret, ans - else: - double_log( - f'GPT output includes 0 / >1 letter in "ABCD": {ans}', - fout) - retry -= 1 - - if retry == 0: - num_options = sum([ch in item for ch in 'ABCD']) - if num_options >= 2: - chars = string.ascii_uppercase[:num_options] - chars = chars + 'E' - num_options += 1 - tmp = rd.randint(0, num_options - 1) - return chars[ - tmp], 'Failed to predict, thus randomly generate one. ' - - -# Extract answer from multiple rolling records -def eval_sub_data(model, sub_data, answer_map): - lt = len(sub_data) - GT, PRED = [], [] - for i in range(lt): - item = sub_data.iloc[i] - idx = item['index'] - GT.append(answer_map[idx]) - PRED.append(prefetch_answer(item)) - if PRED[-1] and (GT[-1] != PRED[-1]): - return 0 - - for i in range(lt): - if PRED[i]: - continue - else: - ret, _ = extract_answer_from_item(model, sub_data.iloc[i]) - PRED[i] = ret - if PRED[i] != GT[i]: - return 0 - return 1 - - -# Evaluate Results -def eval_result(eval_file, eval_method, meta_file): - rd.seed(2680) - assert eval_method == 'openai' - # Set a large retry number to avoid failure - model = OpenAI('gpt-3.5-turbo-0613', retry=99) - - double_log(f'Evaluating {eval_file}', fout) - - result_file = eval_file.replace('.xlsx', f'_{eval_method}_result.pkl') - result = {} - if osp.exists(result_file): - result = load(result_file) - - data = load(eval_file) - data = data.sort_values(by='index') - data['prediction'] = [str(x) for x in data['prediction']] - for k in data.keys(): - data[k.lower() if k not in 'ABCD' else k] = data.pop(k) - - meta = load(meta_file) - - data_main = data[data['index'] < int(1e6)] - cate_map = {i: c for i, c in zip(meta['index'], meta['category'])} - l2_cate_map = {i: c for i, c in zip(meta['index'], meta['l2-category'])} - split_map = {i: c for i, c in zip(meta['index'], meta['split'])} - answer_map = {i: c for i, c in zip(meta['index'], meta['answer'])} - - lt = len(data_main) - hit, tot = 0, 0 - - for i in tqdm(range(lt)): - # Dealing with the normal part - item_main = data_main.iloc[i] - idx = item_main['index'] - - if idx in result: - correct = result[idx] - assert correct in [0, 1] - hit += correct - tot += 1 - continue - - sub_data = data[data['index'] % int(1e6) == idx] - ret = eval_sub_data(model, sub_data, answer_map) - result[idx] = ret - hit += ret - tot += 1 - - dump(result, result_file) - - if (i + 1) % 10 == 0: - double_log((f'Evaluating {eval_file}: {i + 1}/{lt}, ' - f'Acc: {hit / tot * 100: .2f}%. '), fout) - - dump(data_main, 'tmp.xlsx') - data_main = load('tmp.xlsx') - - res = load(result_file) - indices = data_main['index'] - data_main['hit'] = [res[i] for i in indices] - data_main['split'] = [split_map[i] for i in indices] - main_idx = data_main['index'] - data_main['category'] = [cate_map[i] for i in main_idx] - data_main['l2-category'] = [l2_cate_map[i] for i in main_idx] - - # load split - dump(data_main, eval_file.replace('.xlsx', f'_{eval_method}_result.xlsx')) - data_main = load(eval_file.replace('.xlsx', f'_{eval_method}_result.xlsx')) - - overall = report_acc(data_main, None) - dump(overall, eval_file.replace('.xlsx', '_overall.csv')) - double_log(overall) - - l2 = report_acc(data_main, 'l2-category') - dump(l2, eval_file.replace('.xlsx', '_l2.csv')) - double_log(l2) - - leaf = report_acc(data_main, 'category') - dump(leaf, eval_file.replace('.xlsx', '_leaf.csv')) - double_log(leaf) - - if fout is not None: - fout.close() - - return overall, l2, leaf - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Evaluate Inference Results of MMBench-DEV SPLIT. ') - parser.add_argument('result', - type=str, - help='The path to your inference result. ') - parser.add_argument('--meta', - type=str, - default='data/mmbench_dev_20230712.tsv', - help=('The path to your meta file (dev). ' - 'Downloaded from MMBench website. ')) - args = parser.parse_args() - return args - - -if __name__ == '__main__': - args = parse_args() - log_pth = args.result.replace('.xlsx', '_openai_eval.log') - fout = open(log_pth, 'a') - - acc, l2, leaf = eval_result(args.result, 'openai', args.meta)