diff --git a/configs/setr/README.md b/configs/setr/README.md index 5673d9b..3a28635 100644 --- a/configs/setr/README.md +++ b/configs/setr/README.md @@ -36,6 +36,23 @@ This head has two version head. } ``` +## Usage + +You can download the pretrain from [here](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth). Then you can convert its keys with the script `vit2mmseg.py` in the tools directory. + +```shell +python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} +``` + +E.g. + +```shell +python tools/model_converters/vit2mmseg.py \ +jx_vit_large_p16_384-b3be5167.pth pretrain/vit_large_p16.pth +``` + +This script convert the model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. + ## Results and models ### ADE20K diff --git a/configs/setr/setr_mla_512x512_160k_b8_ade20k.py b/configs/setr/setr_mla_512x512_160k_b8_ade20k.py index 6977dba..e1a07ce 100644 --- a/configs/setr/setr_mla_512x512_160k_b8_ade20k.py +++ b/configs/setr/setr_mla_512x512_160k_b8_ade20k.py @@ -8,7 +8,8 @@ model = dict( backbone=dict( img_size=(512, 512), drop_rate=0., - init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')), + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), decode_head=dict(num_classes=150), auxiliary_head=[ dict( diff --git a/configs/setr/setr_naive_512x512_160k_b16_ade20k.py b/configs/setr/setr_naive_512x512_160k_b16_ade20k.py index 3b1f9d7..8ad8c9f 100644 --- a/configs/setr/setr_naive_512x512_160k_b16_ade20k.py +++ b/configs/setr/setr_naive_512x512_160k_b16_ade20k.py @@ -8,7 +8,8 @@ model = dict( backbone=dict( img_size=(512, 512), drop_rate=0., - init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')), + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), decode_head=dict(num_classes=150), auxiliary_head=[ dict( diff --git a/configs/setr/setr_pup_512x512_160k_b16_ade20k.py b/configs/setr/setr_pup_512x512_160k_b16_ade20k.py index 68c3a2a..83997a2 100644 --- a/configs/setr/setr_pup_512x512_160k_b16_ade20k.py +++ b/configs/setr/setr_pup_512x512_160k_b16_ade20k.py @@ -8,7 +8,8 @@ model = dict( backbone=dict( img_size=(512, 512), drop_rate=0., - init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')), + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), decode_head=dict(num_classes=150), auxiliary_head=[ dict( diff --git a/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py b/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py index 3c2fc3a..4237cd5 100644 --- a/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py +++ b/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py @@ -6,7 +6,8 @@ model = dict( pretrained=None, backbone=dict( drop_rate=0, - init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')), + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) optimizer = dict( diff --git a/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py b/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py index 181f444..0c6621e 100644 --- a/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py +++ b/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py @@ -7,7 +7,8 @@ model = dict( pretrained=None, backbone=dict( drop_rate=0., - init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')), + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) optimizer = dict( diff --git a/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py b/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py index 817a029..e108988 100644 --- a/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py +++ b/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py @@ -9,7 +9,8 @@ model = dict( pretrained=None, backbone=dict( drop_rate=0., - init_cfg=dict(type='Pretrained', checkpoint='mmcls://vit_large_p16')), + init_cfg=dict( + type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), auxiliary_head=[ dict( type='SETRUPHead',