diff --git a/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68ab4e6f1f4195831ea62e5c3ac7f7af367621eb Binary files /dev/null and b/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16d19be157fce9c53ea3574a2f8561be7e428df2 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..942db61803276f2ef855fbf07aa654c2de7e378e Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5429fe09919ceca97f1b3a3eed7912ee6aa85793 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f369fe83e0666e38a02479034056b8fc77f61975 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a965da04b34b760a367deecd7beaea8c57a49d64 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c0f46d37d5215f6b25fd8198f9e666d8b24f4bd Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70b5b48c49033e12e436a9356863fb9613ac00eb Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ef02bbc3c2a8af8c8df48afd8b18ec6c5707e1 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c01468215bb6e0091d8e1babe028e0a58c566f99 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7426cb984b000b520ee90e8f6ea55c847ded4f1d Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..119b3c8a3fb570fd48ef6973007f2e81913daece Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..506f492f1b9b04352ee37916d081a8c1313a465f Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..269b03f5e74fe1131afa4cf3b5f73535d271364b Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f28ec5f84da6ff1a3b8032f58bf46a187c150065 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..987df78cc1eb5ded2d87122ed9f573a1f2a22d22 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1f296e78d2152645ef5d4fe0c5a61bc8d2c53ad Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbc96521cd37d05ce0abbe1a3ae44cb6e574663a Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..588792c3935a98d49c1537359e5c9135ee193117 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..115e8c7256bd0a9ec501bc5f9beb9e56ef08d520 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0dea1b22ae7a365dc69933bcf2d9c66ed12b524 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22e0b7b787dd7c196a11166f7d81e3f2439cda55 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab8ec7f326950944520aea7bba93debf52bd221 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a53a7f18fb69f0cd4b295c1dc13b940c4b87b6e0 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7cff568f42706a14c517ff2a896eae82426cea1 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89abcc1d33c8d50ae906c47fd6aace1eee6c849c Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b2dbed2bb0bfbfc17c5d19eebb49e572843db62 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81c07a2a7886a9044ba5e935ee8fad3bca4801a5 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15dac37f1acec3f4948a33c2cfa2275ca9c2c842 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d164bfb44ee042f2c5cd15cdec222a0c1081062e Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..742a64d8be15d8a516eac7f4fb2d13c6e4181808 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9277e740afee0066339286637be5d701bf98a3e3 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3596929758f73c404659ff7807f9b245f63164ef Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30ba692c0008c8a8770e2888591629b45fe95c4b Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/_pruned/ecaresnet101d_pruned.txt b/pytorch-image-models/timm/models/_pruned/ecaresnet101d_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..2589b2f9dd3f0d1e02e1d5ddc1fbcd5c143e02c6 --- /dev/null +++ b/pytorch-image-models/timm/models/_pruned/ecaresnet101d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042] \ No newline at end of file diff --git a/pytorch-image-models/timm/models/_pruned/ecaresnet50d_pruned.txt b/pytorch-image-models/timm/models/_pruned/ecaresnet50d_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..9a8b2bf50e0631dce74d66a1a98e26cae10572a7 --- /dev/null +++ b/pytorch-image-models/timm/models/_pruned/ecaresnet50d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022] \ No newline at end of file diff --git a/pytorch-image-models/timm/models/_pruned/efficientnet_b2_pruned.txt b/pytorch-image-models/timm/models/_pruned/efficientnet_b2_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e3fadee3e9f92eaade96afd8691a5e4437551ee --- /dev/null +++ b/pytorch-image-models/timm/models/_pruned/efficientnet_b2_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000] \ No newline at end of file diff --git a/pytorch-image-models/timm/models/_pruned/efficientnet_b3_pruned.txt b/pytorch-image-models/timm/models/_pruned/efficientnet_b3_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..489781736de08e5cf40bf76528a735fff4a3f61c --- /dev/null +++ b/pytorch-image-models/timm/models/_pruned/efficientnet_b3_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000] \ No newline at end of file diff --git a/pytorch-image-models/timm/models/resnet.py b/pytorch-image-models/timm/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..dd6aa1e8b20085ca9b36a0e3389879bfe083021a --- /dev/null +++ b/pytorch-image-models/timm/models/resnet.py @@ -0,0 +1,2109 @@ +"""PyTorch ResNet + +This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with +additional dropout and dynamic global avg/max pool. + +ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman + +Copyright 2019, Ross Wightman +""" +import math +from functools import partial +from typing import Any, Dict, List, Optional, Tuple, Type, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, LayerType, create_attn, \ + get_attn, get_act_layer, get_norm_layer, create_classifier, create_aa, to_ntuple +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs, register_model_deprecations + +__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this + + +def get_padding(kernel_size: int, stride: int, dilation: int = 1) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + cardinality: int = 1, + base_width: int = 64, + reduce_first: int = 1, + dilation: int = 1, + first_dilation: Optional[int] = None, + act_layer: Type[nn.Module] = nn.ReLU, + norm_layer: Type[nn.Module] = nn.BatchNorm2d, + attn_layer: Optional[Type[nn.Module]] = None, + aa_layer: Optional[Type[nn.Module]] = None, + drop_block: Optional[Type[nn.Module]] = None, + drop_path: Optional[nn.Module] = None, + ): + """ + Args: + inplanes: Input channel dimensionality. + planes: Used to determine output channel dimensionalities. + stride: Stride used in convolution layers. + downsample: Optional downsample layer for residual path. + cardinality: Number of convolution groups. + base_width: Base width used to determine output channel dimensionality. + reduce_first: Reduction factor for first convolution output width of residual blocks. + dilation: Dilation rate for convolution layers. + first_dilation: Dilation rate for first convolution layer. + act_layer: Activation layer. + norm_layer: Normalization layer. + attn_layer: Attention layer. + aa_layer: Anti-aliasing layer. + drop_block: Class for DropBlock layer. + drop_path: Optional DropPath layer. + """ + super(BasicBlock, self).__init__() + + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock does not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d( + inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, + dilation=first_dilation, bias=False) + self.bn1 = norm_layer(first_planes) + self.drop_block = drop_block() if drop_block is not None else nn.Identity() + self.act1 = act_layer(inplace=True) + self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa) + + self.conv2 = nn.Conv2d( + first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) + self.bn2 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act2 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_path = drop_path + + def zero_init_last(self): + if getattr(self.bn2, 'weight', None) is not None: + nn.init.zeros_(self.bn2.weight) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + x = self.drop_block(x) + x = self.act1(x) + x = self.aa(x) + + x = self.conv2(x) + x = self.bn2(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act2(x) + + return x + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + cardinality: int = 1, + base_width: int = 64, + reduce_first: int = 1, + dilation: int = 1, + first_dilation: Optional[int] = None, + act_layer: Type[nn.Module] = nn.ReLU, + norm_layer: Type[nn.Module] = nn.BatchNorm2d, + attn_layer: Optional[Type[nn.Module]] = None, + aa_layer: Optional[Type[nn.Module]] = None, + drop_block: Optional[Type[nn.Module]] = None, + drop_path: Optional[nn.Module] = None, + ): + """ + Args: + inplanes: Input channel dimensionality. + planes: Used to determine output channel dimensionalities. + stride: Stride used in convolution layers. + downsample: Optional downsample layer for residual path. + cardinality: Number of convolution groups. + base_width: Base width used to determine output channel dimensionality. + reduce_first: Reduction factor for first convolution output width of residual blocks. + dilation: Dilation rate for convolution layers. + first_dilation: Dilation rate for first convolution layer. + act_layer: Activation layer. + norm_layer: Normalization layer. + attn_layer: Attention layer. + aa_layer: Anti-aliasing layer. + drop_block: Class for DropBlock layer. + drop_path: Optional DropPath layer. + """ + super(Bottleneck, self).__init__() + + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d( + first_planes, width, kernel_size=3, stride=1 if use_aa else stride, + padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(width) + self.drop_block = drop_block() if drop_block is not None else nn.Identity() + self.act2 = act_layer(inplace=True) + self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa) + + self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_path = drop_path + + def zero_init_last(self): + if getattr(self.bn3, 'weight', None) is not None: + nn.init.zeros_(self.bn3.weight) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.drop_block(x) + x = self.act2(x) + x = self.aa(x) + + x = self.conv3(x) + x = self.bn3(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + + return x + + +def downsample_conv( + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + first_dilation: Optional[int] = None, + norm_layer: Optional[Type[nn.Module]] = None, +) -> nn.Module: + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 + p = get_padding(kernel_size, stride, first_dilation) + + return nn.Sequential(*[ + nn.Conv2d( + in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), + norm_layer(out_channels) + ]) + + +def downsample_avg( + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + first_dilation: Optional[int] = None, + norm_layer: Optional[Type[nn.Module]] = None, +) -> nn.Module: + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + if stride == 1 and dilation == 1: + pool = nn.Identity() + else: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + + return nn.Sequential(*[ + pool, + nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), + norm_layer(out_channels) + ]) + + +def drop_blocks(drop_prob: float = 0.): + return [ + None, None, + partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None, + partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.00) if drop_prob else None] + + +def make_blocks( + block_fns: Tuple[Union[BasicBlock, Bottleneck]], + channels: Tuple[int, ...], + block_repeats: Tuple[int, ...], + inplanes: int, + reduce_first: int = 1, + output_stride: int = 32, + down_kernel_size: int = 1, + avg_down: bool = False, + drop_block_rate: float = 0., + drop_path_rate: float = 0., + **kwargs, +) -> Tuple[List[Tuple[str, nn.Module]], List[Dict[str, Any]]]: + stages = [] + feature_info = [] + net_num_blocks = sum(block_repeats) + net_block_idx = 0 + net_stride = 4 + dilation = prev_dilation = 1 + for stage_idx, (block_fn, planes, num_blocks, db) in enumerate(zip(block_fns, channels, block_repeats, drop_blocks(drop_block_rate))): + stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it + stride = 1 if stage_idx == 0 else 2 + if net_stride >= output_stride: + dilation *= stride + stride = 1 + else: + net_stride *= stride + + downsample = None + if stride != 1 or inplanes != planes * block_fn.expansion: + down_kwargs = dict( + in_channels=inplanes, + out_channels=planes * block_fn.expansion, + kernel_size=down_kernel_size, + stride=stride, + dilation=dilation, + first_dilation=prev_dilation, + norm_layer=kwargs.get('norm_layer'), + ) + downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) + + block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) + blocks = [] + for block_idx in range(num_blocks): + downsample = downsample if block_idx == 0 else None + stride = stride if block_idx == 0 else 1 + block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule + blocks.append(block_fn( + inplanes, + planes, + stride, + downsample, + first_dilation=prev_dilation, + drop_path=DropPath(block_dpr) if block_dpr > 0. else None, + **block_kwargs, + )) + prev_dilation = dilation + inplanes = planes * block_fn.expansion + net_block_idx += 1 + + stages.append((stage_name, nn.Sequential(*blocks))) + feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) + + return stages, feature_info + + +class ResNet(nn.Module): + """ResNet / ResNeXt / SE-ResNeXt / SE-Net + + This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that + * have > 1 stride in the 3x3 conv layer of bottleneck + * have conv-bn-act ordering + + This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s + variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the + 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. + + ResNet variants (the same modifications can be used in SE/ResNeXt models as well): + * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b + * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) + * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample + * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample + * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) + * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample + * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample + + ResNeXt + * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths + * same c,d, e, s variants as ResNet can be enabled + + SE-ResNeXt + * normal - 7x7 stem, stem_width = 64 + * same c, d, e, s variants as ResNet can be enabled + + SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, + reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block + """ + + def __init__( + self, + block: Union[BasicBlock, Bottleneck], + layers: Tuple[int, ...], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + global_pool: str = 'avg', + cardinality: int = 1, + base_width: int = 64, + stem_width: int = 64, + stem_type: str = '', + replace_stem_pool: bool = False, + block_reduce_first: int = 1, + down_kernel_size: int = 1, + avg_down: bool = False, + channels: Optional[Tuple[int, ...]] = (64, 128, 256, 512), + act_layer: LayerType = nn.ReLU, + norm_layer: LayerType = nn.BatchNorm2d, + aa_layer: Optional[Type[nn.Module]] = None, + drop_rate: float = 0.0, + drop_path_rate: float = 0., + drop_block_rate: float = 0., + zero_init_last: bool = True, + block_args: Optional[Dict[str, Any]] = None, + ): + """ + Args: + block (nn.Module): class for the residual block. Options are BasicBlock, Bottleneck. + layers (List[int]) : number of layers in each block + num_classes (int): number of classification classes (default 1000) + in_chans (int): number of input (color) channels. (default 3) + output_stride (int): output stride of the network, 32, 16, or 8. (default 32) + global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg') + cardinality (int): number of convolution groups for 3x3 conv in Bottleneck. (default 1) + base_width (int): bottleneck channels factor. `planes * base_width / 64 * cardinality` (default 64) + stem_width (int): number of channels in stem convolutions (default 64) + stem_type (str): The type of stem (default ''): + * '', default - a single 7x7 conv with a width of stem_width + * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 + * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 + replace_stem_pool (bool): replace stem max-pooling layer with a 3x3 stride-2 convolution + block_reduce_first (int): Reduction factor for first convolution output width of residual blocks, + 1 for all archs except senets, where 2 (default 1) + down_kernel_size (int): kernel size of residual block downsample path, + 1x1 for most, 3x3 for senets (default: 1) + avg_down (bool): use avg pooling for projection skip connection between stages/downsample (default False) + act_layer (str, nn.Module): activation layer + norm_layer (str, nn.Module): normalization layer + aa_layer (nn.Module): anti-aliasing layer + drop_rate (float): Dropout probability before classifier, for training (default 0.) + drop_path_rate (float): Stochastic depth drop-path rate (default 0.) + drop_block_rate (float): Drop block rate (default 0.) + zero_init_last (bool): zero-init the last weight in residual path (usually last BN affine weight) + block_args (dict): Extra kwargs to pass through to block module + """ + super(ResNet, self).__init__() + block_args = block_args or dict() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + act_layer = get_act_layer(act_layer) + norm_layer = get_norm_layer(norm_layer) + + # Stem + deep_stem = 'deep' in stem_type + inplanes = stem_width * 2 if deep_stem else 64 + if deep_stem: + stem_chs = (stem_width, stem_width) + if 'tiered' in stem_type: + stem_chs = (3 * (stem_width // 4), stem_width) + self.conv1 = nn.Sequential(*[ + nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), + norm_layer(stem_chs[0]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), + norm_layer(stem_chs[1]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) + else: + self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = norm_layer(inplanes) + self.act1 = act_layer(inplace=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] + + # Stem pooling. The name 'maxpool' remains for weight compatibility. + if replace_stem_pool: + self.maxpool = nn.Sequential(*filter(None, [ + nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), + create_aa(aa_layer, channels=inplanes, stride=2) if aa_layer is not None else None, + norm_layer(inplanes), + act_layer(inplace=True), + ])) + else: + if aa_layer is not None: + if issubclass(aa_layer, nn.AvgPool2d): + self.maxpool = aa_layer(2) + else: + self.maxpool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=inplanes, stride=2)]) + else: + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # Feature Blocks + block_fns = to_ntuple(len(channels))(block) + stage_modules, stage_feature_info = make_blocks( + block_fns, + channels, + layers, + inplanes, + cardinality=cardinality, + base_width=base_width, + output_stride=output_stride, + reduce_first=block_reduce_first, + avg_down=avg_down, + down_kernel_size=down_kernel_size, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + drop_block_rate=drop_block_rate, + drop_path_rate=drop_path_rate, + **block_args, + ) + for stage in stage_modules: + self.add_module(*stage) # layer1, layer2, etc + self.feature_info.extend(stage_feature_info) + + # Head (Pooling and Classifier) + self.num_features = self.head_hidden_size = channels[-1] * block_fns[-1].expansion + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(zero_init_last=zero_init_last) + + @torch.jit.ignore + def init_weights(self, zero_init_last: bool = True): + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if zero_init_last: + for m in self.modules(): + if hasattr(m, 'zero_init_last'): + m.zero_init_last() + + @torch.jit.ignore + def group_matcher(self, coarse: bool = False): + matcher = dict(stem=r'^conv1|bn1|maxpool', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable: bool = True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self, name_only: bool = False): + return 'fc' if name_only else self.fc + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(5, indices) + + # forward pass + feat_idx = 0 + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + if feat_idx in take_indices: + intermediates.append(x) + x = self.maxpool(x) + + layer_names = ('layer1', 'layer2', 'layer3', 'layer4') + if stop_early: + layer_names = layer_names[:max_index] + for n in layer_names: + feat_idx += 1 + x = getattr(self, n)(x) # won't work with torchscript, but keeps code reasonable, FML + if feat_idx in take_indices: + intermediates.append(x) + + if intermediates_only: + return intermediates + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(5, indices) + layer_names = ('layer1', 'layer2', 'layer3', 'layer4') + layer_names = layer_names[max_index:] + for n in layer_names: + setattr(self, n, nn.Identity()) + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.maxpool(x) + + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq([self.layer1, self.layer2, self.layer3, self.layer4], x, flatten=True) + else: + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + return x if pre_logits else self.fc(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_resnet(variant, pretrained: bool = False, **kwargs) -> ResNet: + return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +def _tcfg(url='', **kwargs): + return _cfg(url=url, **dict({'interpolation': 'bicubic'}, **kwargs)) + + +def _ttcfg(url='', **kwargs): + return _cfg(url=url, **dict({ + 'interpolation': 'bicubic', 'test_input_size': (3, 288, 288), 'test_crop_pct': 0.95, + 'origin_url': 'https://github.com/huggingface/pytorch-image-models', + }, **kwargs)) + + +def _rcfg(url='', **kwargs): + return _cfg(url=url, **dict({ + 'interpolation': 'bicubic', 'crop_pct': 0.95, 'test_input_size': (3, 288, 288), 'test_crop_pct': 1.0, + 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476' + }, **kwargs)) + + +def _r3cfg(url='', **kwargs): + return _cfg(url=url, **dict({ + 'interpolation': 'bicubic', 'input_size': (3, 160, 160), 'pool_size': (5, 5), + 'crop_pct': 0.95, 'test_input_size': (3, 224, 224), 'test_crop_pct': 0.95, + 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476', + }, **kwargs)) + + +def _gcfg(url='', **kwargs): + return _cfg(url=url, **dict({ + 'interpolation': 'bicubic', + 'origin_url': 'https://cv.gluon.ai/model_zoo/classification.html', + }, **kwargs)) + + +default_cfgs = generate_default_cfgs({ + # ResNet and Wide ResNet trained w/ timm (RSB paper and others) + 'resnet10t.c3_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet10t_176_c3-f3215ab1.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), + first_conv='conv1.0'), + 'resnet14t.c3_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet14t_176_c3-c4ed2c37.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), + first_conv='conv1.0'), + 'resnet18.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a1_0-d63eafa0.pth'), + 'resnet18.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a2_0-b61bd467.pth'), + 'resnet18.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a3_0-40c531c8.pth'), + 'resnet18d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', + first_conv='conv1.0'), + 'resnet18d.ra4_e3600_r224_in1k': _rcfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.9, first_conv='conv1.0'), + 'resnet34.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a1_0-46f8f793.pth'), + 'resnet34.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a2_0-82d47d71.pth'), + 'resnet34.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a3_0-a20cabb6.pth', + crop_pct=0.95), + 'resnet34.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), + 'resnet34.ra4_e3600_r224_in1k': _rcfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.9), + 'resnet34d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', + first_conv='conv1.0'), + 'resnet26.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth'), + 'resnet26d.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', + first_conv='conv1.0'), + 'resnet26t.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), + 'resnet50.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth'), + 'resnet50.a1h_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth', + input_size=(3, 176, 176), pool_size=(6, 6), crop_pct=0.9, test_input_size=(3, 224, 224), test_crop_pct=1.0), + 'resnet50.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a2_0-a2746f79.pth'), + 'resnet50.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a3_0-59cae1ef.pth'), + 'resnet50.b1k_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b1k-532a802a.pth'), + 'resnet50.b2k_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b2k-1ba180c1.pth'), + 'resnet50.c1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c1-5ba5e060.pth'), + 'resnet50.c2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c2-d01e05b2.pth'), + 'resnet50.d_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_d-f39db8af.pth'), + 'resnet50.ram_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth'), + 'resnet50.am_in1k': _tcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_am-6c502b37.pth'), + 'resnet50.ra_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ra-85ebb6e5.pth'), + 'resnet50.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/rw_resnet50-86acaeed.pth'), + 'resnet50d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', + first_conv='conv1.0'), + 'resnet50d.ra4_e3600_r224_in1k': _rcfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0, + first_conv='conv1.0'), + 'resnet50d.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a1_0-e20cff14.pth', + first_conv='conv1.0'), + 'resnet50d.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a2_0-a3adc64d.pth', + first_conv='conv1.0'), + 'resnet50d.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a3_0-403fdfad.pth', + first_conv='conv1.0'), + 'resnet50t.untrained': _ttcfg(first_conv='conv1.0'), + 'resnet101.a1h_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth'), + 'resnet101.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1_0-cdcb52a9.pth'), + 'resnet101.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a2_0-6edb36c7.pth'), + 'resnet101.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a3_0-1db14157.pth'), + 'resnet101d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, + test_crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet152.a1h_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth'), + 'resnet152.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1_0-2eee8a7a.pth'), + 'resnet152.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a2_0-b4c6978f.pth'), + 'resnet152.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a3_0-134d4688.pth'), + 'resnet152d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, + test_crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet200.untrained': _ttcfg(), + 'resnet200d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, + test_crop_pct=1.0, test_input_size=(3, 320, 320)), + 'wide_resnet50_2.racm_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth'), + + # torchvision resnet weights + 'resnet18.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet18-f37072fd.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnet34.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet34-b627a593.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnet50.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet50-0676ba61.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnet50.tv2_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet50-11ad3fa6.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnet101.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet101-63fe2227.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnet101.tv2_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet101-cd907fc2.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnet152.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet152-394f9c45.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnet152.tv2_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnet152-f82ba261.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'wide_resnet50_2.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'wide_resnet50_2.tv2_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'wide_resnet101_2.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'wide_resnet101_2.tv2_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + + # ResNets w/ alternative norm layers + 'resnet50_gn.a1h_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth', + crop_pct=0.94), + + # ResNeXt trained in timm (RSB paper and others) + 'resnext50_32x4d.a1h_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth'), + 'resnext50_32x4d.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1_0-b5a91a1d.pth'), + 'resnext50_32x4d.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a2_0-efc76add.pth'), + 'resnext50_32x4d.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a3_0-3e450271.pth'), + 'resnext50_32x4d.ra_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth'), + 'resnext50d_32x4d.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', + first_conv='conv1.0'), + 'resnext101_32x4d.untrained': _ttcfg(), + 'resnext101_64x4d.c1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnext101_64x4d_c-0d0e0cc0.pth'), + + # torchvision ResNeXt weights + 'resnext50_32x4d.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnext101_32x8d.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnext101_64x4d.tv_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth', + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnext50_32x4d.tv2_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + 'resnext101_32x8d.tv2_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth', + input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, + license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), + + # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags + # from https://github.com/facebookresearch/WSL-Images + # Please note the CC-BY-NC 4.0 license on these weights, non-commercial use only. + 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), + 'resnext101_32x16d.fb_wsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), + 'resnext101_32x32d.fb_wsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), + 'resnext101_32x48d.fb_wsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), + + # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'resnet18.fb_ssl_yfcc100m_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnet50.fb_ssl_yfcc100m_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + + # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'resnet18.fb_swsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnet50.fb_swsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth', + license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), + + # Efficient Channel Attention ResNets + 'ecaresnet26t.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + test_crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnetlight.miil_in1k': _tcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnetlight-75a9c627.pth', + test_crop_pct=0.95, test_input_size=(3, 288, 288)), + 'ecaresnet50d.miil_in1k': _tcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d-93c81e3b.pth', + first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), + 'ecaresnet50d_pruned.miil_in1k': _tcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d_p-e4fa23c2.pth', + first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), + 'ecaresnet50t.ra2_in1k': _tcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + test_crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnet50t.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a1_0-99bd76a8.pth', + first_conv='conv1.0'), + 'ecaresnet50t.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a2_0-b1c7b745.pth', + first_conv='conv1.0'), + 'ecaresnet50t.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a3_0-8cc311f1.pth', + first_conv='conv1.0'), + 'ecaresnet101d.miil_in1k': _tcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d-153dad65.pth', + first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), + 'ecaresnet101d_pruned.miil_in1k': _tcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d_p-9e74cb91.pth', + first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), + 'ecaresnet200d.untrained': _ttcfg( + first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8)), + 'ecaresnet269d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', + first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.95, + test_crop_pct=1.0, test_input_size=(3, 352, 352)), + + # Efficient Channel Attention ResNeXts + 'ecaresnext26t_32x4d.untrained': _tcfg(first_conv='conv1.0'), + 'ecaresnext50t_32x4d.untrained': _tcfg(first_conv='conv1.0'), + + # Squeeze-Excitation ResNets, to eventually replace the models in senet.py + 'seresnet18.untrained': _ttcfg(), + 'seresnet34.untrained': _ttcfg(), + 'seresnet50.a1_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a1_0-ffa00869.pth', + crop_pct=0.95), + 'seresnet50.a2_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a2_0-850de0d9.pth', + crop_pct=0.95), + 'seresnet50.a3_in1k': _r3cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a3_0-317ecd56.pth', + crop_pct=0.95), + 'seresnet50.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth'), + 'seresnet50t.untrained': _ttcfg( + first_conv='conv1.0'), + 'seresnet101.untrained': _ttcfg(), + 'seresnet152.untrained': _ttcfg(), + 'seresnet152d.ra2_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, + test_crop_pct=1.0, test_input_size=(3, 320, 320) + ), + 'seresnet200d.untrained': _ttcfg( + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), + 'seresnet269d.untrained': _ttcfg( + first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), + + # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py + 'seresnext26d_32x4d.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', + first_conv='conv1.0'), + 'seresnext26t_32x4d.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', + first_conv='conv1.0'), + 'seresnext50_32x4d.racm_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth'), + 'seresnext101_32x4d.untrained': _ttcfg(), + 'seresnext101_32x8d.ah_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101_32x8d_ah-e6bc4c0a.pth'), + 'seresnext101d_32x8d.ah_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101d_32x8d_ah-191d7b94.pth', + first_conv='conv1.0'), + + # ResNets with anti-aliasing / blur pool + 'resnetaa50d.sw_in12k_ft_in1k': _ttcfg( + hf_hub_id='timm/', + first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), + 'resnetaa101d.sw_in12k_ft_in1k': _ttcfg( + hf_hub_id='timm/', + first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), + 'seresnextaa101d_32x8d.sw_in12k_ft_in1k_288': _ttcfg( + hf_hub_id='timm/', + crop_pct=0.95, input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0, + first_conv='conv1.0'), + 'seresnextaa101d_32x8d.sw_in12k_ft_in1k': _ttcfg( + hf_hub_id='timm/', + first_conv='conv1.0', test_crop_pct=1.0), + 'seresnextaa201d_32x8d.sw_in12k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', first_conv='conv1.0', pool_size=(12, 12), input_size=(3, 384, 384), crop_pct=1.0), + 'seresnextaa201d_32x8d.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, interpolation='bicubic', first_conv='conv1.0', + crop_pct=0.95, input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), + + 'resnetaa50d.sw_in12k': _ttcfg( + hf_hub_id='timm/', + num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), + 'resnetaa50d.d_in12k': _ttcfg( + hf_hub_id='timm/', + num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), + 'resnetaa101d.sw_in12k': _ttcfg( + hf_hub_id='timm/', + num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), + 'seresnextaa101d_32x8d.sw_in12k': _ttcfg( + hf_hub_id='timm/', + num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), + + 'resnetblur18.untrained': _ttcfg(), + 'resnetblur50.bt_in1k': _ttcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth'), + 'resnetblur50d.untrained': _ttcfg(first_conv='conv1.0'), + 'resnetblur101d.untrained': _ttcfg(first_conv='conv1.0'), + 'resnetaa34d.untrained': _ttcfg(first_conv='conv1.0'), + 'resnetaa50.a1h_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetaa50_a1h-4cf422b3.pth'), + + 'seresnetaa50d.untrained': _ttcfg(first_conv='conv1.0'), + 'seresnextaa101d_32x8d.ah_in1k': _rcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnextaa101d_32x8d_ah-83c8ae12.pth', + first_conv='conv1.0'), + + # ResNet-RS models + 'resnetrs50.tf_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs101.tf_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', + input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs152.tf_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs200.tf_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetrs200_c-6b698b88.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs270.tf_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs350.tf_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs420.tf_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), + interpolation='bicubic', first_conv='conv1.0'), + + # gluon resnet weights + 'resnet18.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), + 'resnet34.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), + 'resnet50.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), + 'resnet101.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), + 'resnet152.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), + 'resnet50c.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', + first_conv='conv1.0'), + 'resnet101c.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', + first_conv='conv1.0'), + 'resnet152c.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', + first_conv='conv1.0'), + 'resnet50d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', + first_conv='conv1.0'), + 'resnet101d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', + first_conv='conv1.0'), + 'resnet152d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', + first_conv='conv1.0'), + 'resnet50s.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', + first_conv='conv1.0'), + 'resnet101s.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', + first_conv='conv1.0'), + 'resnet152s.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', + first_conv='conv1.0'), + 'resnext50_32x4d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), + 'resnext101_32x4d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), + 'resnext101_64x4d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), + 'seresnext50_32x4d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), + 'seresnext101_32x4d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), + 'seresnext101_64x4d.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), + 'senet154.gluon_in1k': _gcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', + first_conv='conv1.0'), + + 'test_resnet.r160_in1k': _cfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, + input_size=(3, 160, 160), pool_size=(5, 5), first_conv='conv1.0'), +}) + + +@register_model +def resnet10t(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-10-T model. + """ + model_args = dict(block=BasicBlock, layers=(1, 1, 1, 1), stem_width=32, stem_type='deep_tiered', avg_down=True) + return _create_resnet('resnet10t', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet14t(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-14-T model. + """ + model_args = dict(block=Bottleneck, layers=(1, 1, 1, 1), stem_width=32, stem_type='deep_tiered', avg_down=True) + return _create_resnet('resnet14t', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet18(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2)) + return _create_resnet('resnet18', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet18d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-18-D model. + """ + model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnet18d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet34(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3)) + return _create_resnet('resnet34', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet34d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-34-D model. + """ + model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnet34d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet26(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-26 model. + """ + model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2)) + return _create_resnet('resnet26', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet26t(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-26-T model. + """ + model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep_tiered', avg_down=True) + return _create_resnet('resnet26t', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet26d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-26-D model. + """ + model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnet26d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet50(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3)) + return _create_resnet('resnet50', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet50c(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-C model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep') + return _create_resnet('resnet50c', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet50d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-D model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnet50d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet50s(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-S model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=64, stem_type='deep') + return _create_resnet('resnet50s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet50t(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-T model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', avg_down=True) + return _create_resnet('resnet50t', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet101(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3)) + return _create_resnet('resnet101', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet101c(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101-C model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep') + return _create_resnet('resnet101c', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet101d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101-D model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnet101d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet101s(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101-S model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=64, stem_type='deep') + return _create_resnet('resnet101s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet152(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3)) + return _create_resnet('resnet152', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet152c(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-152-C model. + """ + model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep') + return _create_resnet('resnet152c', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet152d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-152-D model. + """ + model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnet152d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet152s(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-152-S model. + """ + model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=64, stem_type='deep') + return _create_resnet('resnet152s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet200(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-200 model. + """ + model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3)) + return _create_resnet('resnet200', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet200d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-200-D model. + """ + model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnet200d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def wide_resnet50_2(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a Wide ResNet-50-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), base_width=128) + return _create_resnet('wide_resnet50_2', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def wide_resnet101_2(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a Wide ResNet-101-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), base_width=128) + return _create_resnet('wide_resnet101_2', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnet50_gn(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50 model w/ GroupNorm + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), norm_layer='groupnorm') + return _create_resnet('resnet50_gn', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnext50_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4) + return _create_resnet('resnext50_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnext50d_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4, + stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnext50d_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnext101_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNeXt-101 32x4d model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=4) + return _create_resnet('resnext101_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnext101_32x8d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNeXt-101 32x8d model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8) + return _create_resnet('resnext101_32x8d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnext101_32x16d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNeXt-101 32x16d model + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=16) + return _create_resnet('resnext101_32x16d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnext101_32x32d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNeXt-101 32x32d model + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=32) + return _create_resnet('resnext101_32x32d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnext101_64x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNeXt101-64x4d model. + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=64, base_width=4) + return _create_resnet('resnext101_64x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet26t(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet26t', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet50d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet50d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet50d_pruned(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet50t(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs an ECA-ResNet-50-T model. + Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet50t', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnetlight(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-D light model with eca. + """ + model_args = dict( + block=Bottleneck, layers=(1, 1, 11, 3), stem_width=32, avg_down=True, + block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnetlight', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet101d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet101d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet101d_pruned(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet200d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-200-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet200d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnet269d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-269-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=(3, 30, 48, 8), stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnet269d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnext26t_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def ecaresnext50t_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs an ECA-ResNeXt-50-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) + return _create_resnet('ecaresnext50t_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet18(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), block_args=dict(attn_layer='se')) + return _create_resnet('seresnet18', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet34(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3), block_args=dict(attn_layer='se')) + return _create_resnet('seresnet34', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet50(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), block_args=dict(attn_layer='se')) + return _create_resnet('seresnet50', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet50t(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', + avg_down=True, block_args=dict(attn_layer='se')) + return _create_resnet('seresnet50t', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet101(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), block_args=dict(attn_layer='se')) + return _create_resnet('seresnet101', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet152(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), block_args=dict(attn_layer='se')) + return _create_resnet('seresnet152', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet152d(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', + avg_down=True, block_args=dict(attn_layer='se')) + return _create_resnet('seresnet152d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet200d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-200-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', + avg_down=True, block_args=dict(attn_layer='se')) + return _create_resnet('seresnet200d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnet269d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-269-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=(3, 30, 48, 8), stem_width=32, stem_type='deep', + avg_down=True, block_args=dict(attn_layer='se')) + return _create_resnet('seresnet269d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnext26d_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a SE-ResNeXt-26-D model.` + This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for + combination of deep stem and avg_pool in downsample. + """ + model_args = dict( + block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, + stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) + return _create_resnet('seresnext26d_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnext26t_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a SE-ResNet-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. + """ + model_args = dict( + block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se')) + return _create_resnet('seresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnext50_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4, + block_args=dict(attn_layer='se')) + return _create_resnet('seresnext50_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnext101_32x4d(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=4, + block_args=dict(attn_layer='se')) + return _create_resnet('seresnext101_32x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnext101_32x8d(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, + block_args=dict(attn_layer='se')) + return _create_resnet('seresnext101_32x8d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnext101d_32x8d(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, + stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se')) + return _create_resnet('seresnext101d_32x8d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnext101_64x4d(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), cardinality=64, base_width=4, + block_args=dict(attn_layer='se')) + return _create_resnet('seresnext101_64x4d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def senet154(pretrained: bool = False, **kwargs) -> ResNet: + model_args = dict( + block=Bottleneck, layers=(3, 8, 36, 3), cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se')) + return _create_resnet('senet154', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetblur18(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-18 model with blur anti-aliasing + """ + model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), aa_layer=BlurPool2d) + return _create_resnet('resnetblur18', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetblur50(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50 model with blur anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=BlurPool2d) + return _create_resnet('resnetblur50', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetblur50d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-D model with blur anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=BlurPool2d, + stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnetblur50d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetblur101d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101-D model with blur anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), aa_layer=BlurPool2d, + stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnetblur101d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetaa34d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-34-D model w/ avgpool anti-aliasing + """ + model_args = dict( + block=BasicBlock, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnetaa34d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetaa50(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50 model with avgpool anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d) + return _create_resnet('resnetaa50', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetaa50d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-50-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnetaa50d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetaa101d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-101-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True) + return _create_resnet('resnetaa101d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnetaa50d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a SE=ResNet-50-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) + return _create_resnet('seresnetaa50d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnextaa101d_32x8d(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, + stem_width=32, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, + block_args=dict(attn_layer='se')) + return _create_resnet('seresnextaa101d_32x8d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def seresnextaa201d_32x8d(pretrained: bool = False, **kwargs): + """Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=(3, 24, 36, 4), cardinality=32, base_width=8, + stem_width=64, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, + block_args=dict(attn_layer='se')) + return _create_resnet('seresnextaa201d_32x8d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetrs50(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-RS-50 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer)) + return _create_resnet('resnetrs50', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetrs101(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-RS-101 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer)) + return _create_resnet('resnetrs101', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetrs152(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-RS-152 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer)) + return _create_resnet('resnetrs152', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetrs200(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-RS-200 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer)) + return _create_resnet('resnetrs200', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetrs270(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-RS-270 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=(4, 29, 53, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer)) + return _create_resnet('resnetrs270', pretrained, **dict(model_args, **kwargs)) + + + +@register_model +def resnetrs350(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-RS-350 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=(4, 36, 72, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer)) + return _create_resnet('resnetrs350', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetrs420(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a ResNet-RS-420 model + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=(4, 44, 87, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer)) + return _create_resnet('resnetrs420', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def test_resnet(pretrained: bool = False, **kwargs) -> ResNet: + """Constructs a tiny ResNet test model. + """ + model_args = dict( + block=[BasicBlock, BasicBlock, Bottleneck, BasicBlock], layers=(1, 1, 1, 1), + stem_width=16, stem_type='deep', avg_down=True, channels=(32, 48, 48, 96)) + return _create_resnet('test_resnet', pretrained, **dict(model_args, **kwargs)) + + +register_model_deprecations(__name__, { + 'tv_resnet34': 'resnet34.tv_in1k', + 'tv_resnet50': 'resnet50.tv_in1k', + 'tv_resnet101': 'resnet101.tv_in1k', + 'tv_resnet152': 'resnet152.tv_in1k', + 'tv_resnext50_32x4d' : 'resnext50_32x4d.tv_in1k', + 'ig_resnext101_32x8d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', + 'ig_resnext101_32x16d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', + 'ig_resnext101_32x32d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', + 'ig_resnext101_32x48d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', + 'ssl_resnet18': 'resnet18.fb_ssl_yfcc100m_ft_in1k', + 'ssl_resnet50': 'resnet50.fb_ssl_yfcc100m_ft_in1k', + 'ssl_resnext50_32x4d': 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k', + 'ssl_resnext101_32x4d': 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k', + 'ssl_resnext101_32x8d': 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k', + 'ssl_resnext101_32x16d': 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k', + 'swsl_resnet18': 'resnet18.fb_swsl_ig1b_ft_in1k', + 'swsl_resnet50': 'resnet50.fb_swsl_ig1b_ft_in1k', + 'swsl_resnext50_32x4d': 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k', + 'swsl_resnext101_32x4d': 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k', + 'swsl_resnext101_32x8d': 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k', + 'swsl_resnext101_32x16d': 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k', + 'gluon_resnet18_v1b': 'resnet18.gluon_in1k', + 'gluon_resnet34_v1b': 'resnet34.gluon_in1k', + 'gluon_resnet50_v1b': 'resnet50.gluon_in1k', + 'gluon_resnet101_v1b': 'resnet101.gluon_in1k', + 'gluon_resnet152_v1b': 'resnet152.gluon_in1k', + 'gluon_resnet50_v1c': 'resnet50c.gluon_in1k', + 'gluon_resnet101_v1c': 'resnet101c.gluon_in1k', + 'gluon_resnet152_v1c': 'resnet152c.gluon_in1k', + 'gluon_resnet50_v1d': 'resnet50d.gluon_in1k', + 'gluon_resnet101_v1d': 'resnet101d.gluon_in1k', + 'gluon_resnet152_v1d': 'resnet152d.gluon_in1k', + 'gluon_resnet50_v1s': 'resnet50s.gluon_in1k', + 'gluon_resnet101_v1s': 'resnet101s.gluon_in1k', + 'gluon_resnet152_v1s': 'resnet152s.gluon_in1k', + 'gluon_resnext50_32x4d': 'resnext50_32x4d.gluon_in1k', + 'gluon_resnext101_32x4d': 'resnext101_32x4d.gluon_in1k', + 'gluon_resnext101_64x4d': 'resnext101_64x4d.gluon_in1k', + 'gluon_seresnext50_32x4d': 'seresnext50_32x4d.gluon_in1k', + 'gluon_seresnext101_32x4d': 'seresnext101_32x4d.gluon_in1k', + 'gluon_seresnext101_64x4d': 'seresnext101_64x4d.gluon_in1k', + 'gluon_senet154': 'senet154.gluon_in1k', + 'seresnext26tn_32x4d': 'seresnext26t_32x4d', +}) diff --git a/pytorch-image-models/timm/models/resnetv2.py b/pytorch-image-models/timm/models/resnetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..ee7d6bcabd5678dcc40fcf4df623ab0db9163fa6 --- /dev/null +++ b/pytorch-image-models/timm/models/resnetv2.py @@ -0,0 +1,911 @@ +"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. + +A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfer (BiT) source code +at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have +been included here as pretrained models from their original .NPZ checkpoints. + +Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and +extra padding support to allow porting of official Hybrid ResNet pretrained weights from +https://github.com/google-research/vision_transformer + +Thanks to the Google team for the above two repositories and associated papers: +* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 +* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 +* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + +Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. +""" +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict # pylint: disable=g-importing-member +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import GroupNormAct, BatchNormAct2d, EvoNorm2dS0, FilterResponseNormTlu2d, ClassifierHead, \ + DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d, get_act_layer, get_norm_act_layer, make_divisible +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq, named_apply, adapt_input_conv +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['ResNetV2'] # model_registry will add each entrypoint fn to this + + +class PreActBasic(nn.Module): + """ Pre-activation basic block (not in typical 'v2' implementations) + """ + + def __init__( + self, + in_chs, + out_chs=None, + bottle_ratio=1.0, + stride=1, + dilation=1, + first_dilation=None, + groups=1, + act_layer=None, + conv_layer=None, + norm_layer=None, + proj_layer=None, + drop_path_rate=0., + ): + super().__init__() + first_dilation = first_dilation or dilation + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_divisible(out_chs * bottle_ratio) + + if proj_layer is not None and (stride != 1 or first_dilation != dilation or in_chs != out_chs): + self.downsample = proj_layer( + in_chs, + out_chs, + stride=stride, + dilation=dilation, + first_dilation=first_dilation, + preact=True, + conv_layer=conv_layer, + norm_layer=norm_layer, + ) + else: + self.downsample = None + + self.norm1 = norm_layer(in_chs) + self.conv1 = conv_layer(in_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm2 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, out_chs, 3, dilation=dilation, groups=groups) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.weight) + + def forward(self, x): + x_preact = self.norm1(x) + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x_preact) + + # residual branch + x = self.conv1(x_preact) + x = self.conv2(self.norm2(x)) + x = self.drop_path(x) + return x + shortcut + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + + Follows the implementation of "Identity Mappings in Deep Residual Networks": + https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua + + Except it puts the stride on 3x3 conv when available. + """ + + def __init__( + self, + in_chs, + out_chs=None, + bottle_ratio=0.25, + stride=1, + dilation=1, + first_dilation=None, + groups=1, + act_layer=None, + conv_layer=None, + norm_layer=None, + proj_layer=None, + drop_path_rate=0., + ): + super().__init__() + first_dilation = first_dilation or dilation + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_divisible(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, + out_chs, + stride=stride, + dilation=dilation, + first_dilation=first_dilation, + preact=True, + conv_layer=conv_layer, + norm_layer=norm_layer, + ) + else: + self.downsample = None + + self.norm1 = norm_layer(in_chs) + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm2 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm3 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.weight) + + def forward(self, x): + x_preact = self.norm1(x) + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x_preact) + + # residual branch + x = self.conv1(x_preact) + x = self.conv2(self.norm2(x)) + x = self.conv3(self.norm3(x)) + x = self.drop_path(x) + return x + shortcut + + +class Bottleneck(nn.Module): + """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. + """ + def __init__( + self, + in_chs, + out_chs=None, + bottle_ratio=0.25, + stride=1, + dilation=1, + first_dilation=None, + groups=1, + act_layer=None, + conv_layer=None, + norm_layer=None, + proj_layer=None, + drop_path_rate=0., + ): + super().__init__() + first_dilation = first_dilation or dilation + act_layer = act_layer or nn.ReLU + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_divisible(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, + out_chs, + stride=stride, + dilation=dilation, + preact=False, + conv_layer=conv_layer, + norm_layer=norm_layer, + ) + else: + self.downsample = None + + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm1 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm2 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.norm3 = norm_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.act3 = act_layer(inplace=True) + + def zero_init_last(self): + if getattr(self.norm3, 'weight', None) is not None: + nn.init.zeros_(self.norm3.weight) + + def forward(self, x): + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + # residual + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.drop_path(x) + x = self.act3(x + shortcut) + return x + + +class DownsampleConv(nn.Module): + def __init__( + self, + in_chs, + out_chs, + stride=1, + dilation=1, + first_dilation=None, + preact=True, + conv_layer=None, + norm_layer=None, + ): + super(DownsampleConv, self).__init__() + self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(x)) + + +class DownsampleAvg(nn.Module): + def __init__( + self, + in_chs, + out_chs, + stride=1, + dilation=1, + first_dilation=None, + preact=True, + conv_layer=None, + norm_layer=None, + ): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(self.pool(x))) + + +class ResNetStage(nn.Module): + """ResNet Stage.""" + def __init__( + self, + in_chs, + out_chs, + stride, + dilation, + depth, + bottle_ratio=0.25, + groups=1, + avg_down=False, + block_dpr=None, + block_fn=PreActBottleneck, + act_layer=None, + conv_layer=None, + norm_layer=None, + **block_kwargs, + ): + super(ResNetStage, self).__init__() + first_dilation = 1 if dilation in (1, 2) else 2 + layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) + proj_layer = DownsampleAvg if avg_down else DownsampleConv + prev_chs = in_chs + self.blocks = nn.Sequential() + for block_idx in range(depth): + drop_path_rate = block_dpr[block_idx] if block_dpr else 0. + stride = stride if block_idx == 0 else 1 + self.blocks.add_module(str(block_idx), block_fn( + prev_chs, + out_chs, + stride=stride, + dilation=dilation, + bottle_ratio=bottle_ratio, + groups=groups, + first_dilation=first_dilation, + proj_layer=proj_layer, + drop_path_rate=drop_path_rate, + **layer_kwargs, + **block_kwargs, + )) + prev_chs = out_chs + first_dilation = dilation + proj_layer = None + + def forward(self, x): + x = self.blocks(x) + return x + + +def is_stem_deep(stem_type): + return any([s in stem_type for s in ('deep', 'tiered')]) + + +def create_resnetv2_stem( + in_chs, + out_chs=64, + stem_type='', + preact=True, + conv_layer=StdConv2d, + norm_layer=partial(GroupNormAct, num_groups=32), +): + stem = OrderedDict() + assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') + + # NOTE conv padding mode can be changed by overriding the conv_layer def + if is_stem_deep(stem_type): + # A 3 deep 3x3 conv stack as in ResNet V1D models + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets + stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) + stem['norm1'] = norm_layer(stem_chs[0]) + stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) + stem['norm2'] = norm_layer(stem_chs[1]) + stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) + if not preact: + stem['norm3'] = norm_layer(out_chs) + else: + # The usual 7x7 stem conv + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + if not preact: + stem['norm'] = norm_layer(out_chs) + + if 'fixed' in stem_type: + # 'fixed' SAME padding approximation that is used in BiT models + stem['pad'] = nn.ConstantPad2d(1, 0.) + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + elif 'same' in stem_type: + # full, input size based 'SAME' padding, used in ViT Hybrid model + stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') + else: + # the usual PyTorch symmetric padding + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + return nn.Sequential(stem) + + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode. + """ + + def __init__( + self, + layers, + channels=(256, 512, 1024, 2048), + num_classes=1000, + in_chans=3, + global_pool='avg', + output_stride=32, + width_factor=1, + stem_chs=64, + stem_type='', + avg_down=False, + preact=True, + basic=False, + bottle_ratio=0.25, + act_layer=nn.ReLU, + norm_layer=partial(GroupNormAct, num_groups=32), + conv_layer=StdConv2d, + drop_rate=0., + drop_path_rate=0., + zero_init_last=False, + ): + """ + Args: + layers (List[int]) : number of layers in each block + channels (List[int]) : number of channels in each block: + num_classes (int): number of classification classes (default 1000) + in_chans (int): number of input (color) channels. (default 3) + global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg') + output_stride (int): output stride of the network, 32, 16, or 8. (default 32) + width_factor (int): channel (width) multiplication factor + stem_chs (int): stem width (default: 64) + stem_type (str): stem type (default: '' == 7x7) + avg_down (bool): average pooling in residual downsampling (default: False) + preact (bool): pre-activiation (default: True) + act_layer (Union[str, nn.Module]): activation layer + norm_layer (Union[str, nn.Module]): normalization layer + conv_layer (nn.Module): convolution module + drop_rate: classifier dropout rate (default: 0.) + drop_path_rate: stochastic depth rate (default: 0.) + zero_init_last: zero-init last weight in residual path (default: False) + """ + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + wf = width_factor + norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) + act_layer = get_act_layer(act_layer) + + self.feature_info = [] + stem_chs = make_divisible(stem_chs * wf) + self.stem = create_resnetv2_stem( + in_chans, + stem_chs, + stem_type, + preact, + conv_layer=conv_layer, + norm_layer=norm_layer, + ) + stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) + + prev_chs = stem_chs + curr_stride = 4 + dilation = 1 + block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + if preact: + block_fn = PreActBasic if basic else PreActBottleneck + else: + assert not basic + block_fn = Bottleneck + self.stages = nn.Sequential() + for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): + out_chs = make_divisible(c * wf) + stride = 1 if stage_idx == 0 else 2 + if curr_stride >= output_stride: + dilation *= stride + stride = 1 + stage = ResNetStage( + prev_chs, + out_chs, + stride=stride, + dilation=dilation, + depth=d, + bottle_ratio=bottle_ratio, + avg_down=avg_down, + act_layer=act_layer, + conv_layer=conv_layer, + norm_layer=norm_layer, + block_dpr=bdpr, + block_fn=block_fn, + ) + prev_chs = out_chs + curr_stride *= stride + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] + self.stages.add_module(str(stage_idx), stage) + + self.num_features = self.head_hidden_size = prev_chs + self.norm = norm_layer(self.num_features) if preact else nn.Identity() + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + use_conv=True, + ) + + self.init_weights(zero_init_last=zero_init_last) + self.grad_checkpointing = False + + @torch.jit.ignore + def init_weights(self, zero_init_last=True): + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix='resnet/'): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x, flatten=True) + else: + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): + if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +@torch.no_grad() +def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): + import numpy as np + + def t2p(conv_weights): + """Possibly convert HWIO to OIHW.""" + if conv_weights.ndim == 4: + conv_weights = conv_weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(conv_weights) + + weights = np.load(checkpoint_path) + stem_conv_w = adapt_input_conv( + model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) + model.stem.conv.weight.copy_(stem_conv_w) + model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) + model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) + if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ + model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: + model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) + model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) + for i, (sname, stage) in enumerate(model.stages.named_children()): + for j, (bname, block) in enumerate(stage.blocks.named_children()): + cname = 'standardized_conv2d' + block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' + block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) + block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) + block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) + block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) + block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) + block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) + block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) + block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) + block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) + if block.downsample is not None: + w = weights[f'{block_prefix}a/proj/{cname}/kernel'] + block.downsample.conv.weight.copy_(t2p(w)) + + +def _create_resnetv2(variant, pretrained=False, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ResNetV2, variant, pretrained, + feature_cfg=feature_cfg, + **kwargs, + ) + + +def _create_resnetv2_bit(variant, pretrained=False, **kwargs): + return _create_resnetv2( + variant, + pretrained=pretrained, + stem_type='fixed', + conv_layer=partial(StdConv2d, eps=1e-8), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + 'resnetv2_50x1_bit.goog_distilled_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', custom_load=True), + 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', custom_load=True), + 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic', custom_load=True), + + # pretrained on imagenet21k, finetuned on imagenet1k + 'resnetv2_50x1_bit.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), + 'resnetv2_50x3_bit.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), + 'resnetv2_101x1_bit.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), + 'resnetv2_101x3_bit.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), + 'resnetv2_152x2_bit.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), + 'resnetv2_152x4_bit.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0, custom_load=True), # only one at 480x480? + + # trained on imagenet-21k + 'resnetv2_50x1_bit.goog_in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843, custom_load=True), + 'resnetv2_50x3_bit.goog_in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843, custom_load=True), + 'resnetv2_101x1_bit.goog_in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843, custom_load=True), + 'resnetv2_101x3_bit.goog_in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843, custom_load=True), + 'resnetv2_152x2_bit.goog_in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843, custom_load=True), + 'resnetv2_152x4_bit.goog_in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843, custom_load=True), + + 'resnetv2_18.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnetv2_18d.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0, + first_conv='stem.conv1'), + 'resnetv2_34.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnetv2_34d.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0, + first_conv='stem.conv1'), + 'resnetv2_34d.ra4_e3600_r384_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=1.0, input_size=(3, 384, 384), pool_size=(12, 12), test_input_size=(3, 448, 448), + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50.a1h_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnetv2_50d.untrained': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50t.untrained': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_101.a1h_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnetv2_101d.untrained': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_152.untrained': _cfg( + interpolation='bicubic'), + 'resnetv2_152d.untrained': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + + 'resnetv2_50d_gn.ah_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', first_conv='stem.conv1', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnetv2_50d_evos.ah_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', first_conv='stem.conv1', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'resnetv2_50d_frn.untrained': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), +}) + + +@register_model +def resnetv2_50x1_bit(pretrained=False, **kwargs) -> ResNetV2: + return _create_resnetv2_bit( + 'resnetv2_50x1_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bit(pretrained=False, **kwargs) -> ResNetV2: + return _create_resnetv2_bit( + 'resnetv2_50x3_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bit(pretrained=False, **kwargs) -> ResNetV2: + return _create_resnetv2_bit( + 'resnetv2_101x1_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bit(pretrained=False, **kwargs) -> ResNetV2: + return _create_resnetv2_bit( + 'resnetv2_101x3_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bit(pretrained=False, **kwargs) -> ResNetV2: + return _create_resnetv2_bit( + 'resnetv2_152x2_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bit(pretrained=False, **kwargs) -> ResNetV2: + return _create_resnetv2_bit( + 'resnetv2_152x4_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_18(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[2, 2, 2, 2], channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0, + conv_layer=create_conv2d, norm_layer=BatchNormAct2d + ) + return _create_resnetv2('resnetv2_18', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_18d(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[2, 2, 2, 2], channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0, + conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True + ) + return _create_resnetv2('resnetv2_18d', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_34(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=(3, 4, 6, 3), channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0, + conv_layer=create_conv2d, norm_layer=BatchNormAct2d + ) + return _create_resnetv2('resnetv2_34', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_34d(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=(3, 4, 6, 3), channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0, + conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True + ) + return _create_resnetv2('resnetv2_34d', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_50(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) + return _create_resnetv2('resnetv2_50', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_50d(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True) + return _create_resnetv2('resnetv2_50d', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_50t(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='tiered', avg_down=True) + return _create_resnetv2('resnetv2_50t', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_101(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) + return _create_resnetv2('resnetv2_101', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_101d(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True) + return _create_resnetv2('resnetv2_101d', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_152(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) + return _create_resnetv2('resnetv2_152', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_152d(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True) + return _create_resnetv2('resnetv2_152d', pretrained=pretrained, **dict(model_args, **kwargs)) + + +# Experimental configs (may change / be removed) + +@register_model +def resnetv2_50d_gn(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct, + stem_type='deep', avg_down=True) + return _create_resnetv2('resnetv2_50d_gn', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_50d_evos(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0, + stem_type='deep', avg_down=True) + return _create_resnetv2('resnetv2_50d_evos', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def resnetv2_50d_frn(pretrained=False, **kwargs) -> ResNetV2: + model_args = dict( + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d, + stem_type='deep', avg_down=True) + return _create_resnetv2('resnetv2_50d_frn', pretrained=pretrained, **dict(model_args, **kwargs)) + + +register_model_deprecations(__name__, { + 'resnetv2_50x1_bitm': 'resnetv2_50x1_bit.goog_in21k_ft_in1k', + 'resnetv2_50x3_bitm': 'resnetv2_50x3_bit.goog_in21k_ft_in1k', + 'resnetv2_101x1_bitm': 'resnetv2_101x1_bit.goog_in21k_ft_in1k', + 'resnetv2_101x3_bitm': 'resnetv2_101x3_bit.goog_in21k_ft_in1k', + 'resnetv2_152x2_bitm': 'resnetv2_152x2_bit.goog_in21k_ft_in1k', + 'resnetv2_152x4_bitm': 'resnetv2_152x4_bit.goog_in21k_ft_in1k', + 'resnetv2_50x1_bitm_in21k': 'resnetv2_50x1_bit.goog_in21k', + 'resnetv2_50x3_bitm_in21k': 'resnetv2_50x3_bit.goog_in21k', + 'resnetv2_101x1_bitm_in21k': 'resnetv2_101x1_bit.goog_in21k', + 'resnetv2_101x3_bitm_in21k': 'resnetv2_101x3_bit.goog_in21k', + 'resnetv2_152x2_bitm_in21k': 'resnetv2_152x2_bit.goog_in21k', + 'resnetv2_152x4_bitm_in21k': 'resnetv2_152x4_bit.goog_in21k', + 'resnetv2_50x1_bit_distilled': 'resnetv2_50x1_bit.goog_distilled_in1k', + 'resnetv2_152x2_bit_teacher': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k', + 'resnetv2_152x2_bit_teacher_384': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384', +}) diff --git a/pytorch-image-models/timm/models/rexnet.py b/pytorch-image-models/timm/models/rexnet.py new file mode 100644 index 0000000000000000000000000000000000000000..9971728c242f9faa74a92d4c4297be265fa72270 --- /dev/null +++ b/pytorch-image-models/timm/models/rexnet.py @@ -0,0 +1,358 @@ +""" ReXNet + +A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - +https://arxiv.org/abs/2007.00992 + +Adapted from original impl at https://github.com/clovaai/rexnet +Copyright (c) 2020-present NAVER Corp. MIT license + +Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman +Copyright 2020 Ross Wightman +""" + +from functools import partial +from math import ceil +from typing import Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule +from ._builder import build_model_with_cfg +from ._efficientnet_builder import efficientnet_init_weights +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['RexNet'] # model_registry will add each entrypoint fn to this + + +SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) + + +class LinearBottleneck(nn.Module): + def __init__( + self, + in_chs, + out_chs, + stride, + dilation=(1, 1), + exp_ratio=1.0, + se_ratio=0., + ch_div=1, + act_layer='swish', + dw_act_layer='relu6', + drop_path=None, + ): + super(LinearBottleneck, self).__init__() + self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and in_chs <= out_chs + self.in_channels = in_chs + self.out_channels = out_chs + + if exp_ratio != 1.: + dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) + self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer) + else: + dw_chs = in_chs + self.conv_exp = None + + self.conv_dw = ConvNormAct( + dw_chs, + dw_chs, + kernel_size=3, + stride=stride, + dilation=dilation[0], + groups=dw_chs, + apply_act=False, + ) + if se_ratio > 0: + self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) + else: + self.se = None + self.act_dw = create_act_layer(dw_act_layer) + + self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False) + self.drop_path = drop_path + + def feat_channels(self, exp=False): + return self.conv_dw.out_channels if exp else self.out_channels + + def forward(self, x): + shortcut = x + if self.conv_exp is not None: + x = self.conv_exp(x) + x = self.conv_dw(x) + if self.se is not None: + x = self.se(x) + x = self.act_dw(x) + x = self.conv_pwl(x) + if self.use_shortcut: + if self.drop_path is not None: + x = self.drop_path(x) + x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1) + return x + + +def _block_cfg( + width_mult=1.0, + depth_mult=1.0, + initial_chs=16, + final_chs=180, + se_ratio=0., + ch_div=1, +): + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) + exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) + depth = sum(layers[:]) * 3 + base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs + + # The following channel configuration is a simple instance to make each layer become an expand layer. + out_chs_list = [] + for i in range(depth // 3): + out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) + base_chs += final_chs / (depth // 3 * 1.0) + + se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) + + return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) + + +def _build_blocks( + block_cfg, + prev_chs, + width_mult, + ch_div=1, + output_stride=32, + act_layer='swish', + dw_act_layer='relu6', + drop_path_rate=0., +): + feat_chs = [prev_chs] + feature_info = [] + curr_stride = 2 + dilation = 1 + features = [] + num_blocks = len(block_cfg) + for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): + next_dilation = dilation + if stride > 1: + fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] + if curr_stride >= output_stride: + next_dilation = dilation * stride + stride = 1 + block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule + drop_path = DropPath(block_dpr) if block_dpr > 0. else None + features.append(LinearBottleneck( + in_chs=prev_chs, + out_chs=chs, + exp_ratio=exp_ratio, + stride=stride, + dilation=(dilation, next_dilation), + se_ratio=se_ratio, + ch_div=ch_div, + act_layer=act_layer, + dw_act_layer=dw_act_layer, + drop_path=drop_path, + )) + curr_stride *= stride + dilation = next_dilation + prev_chs = chs + feat_chs += [features[-1].feat_channels()] + pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] + features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer)) + return features, feature_info + + +class RexNet(nn.Module): + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + output_stride=32, + initial_chs=16, + final_chs=180, + width_mult=1.0, + depth_mult=1.0, + se_ratio=1/12., + ch_div=1, + act_layer='swish', + dw_act_layer='relu6', + drop_rate=0.2, + drop_path_rate=0., + ): + super(RexNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + assert output_stride in (32, 16, 8) + stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 + stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) + self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) + + block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) + features, self.feature_info = _build_blocks( + block_cfg, + stem_chs, + width_mult, + ch_div, + output_stride, + act_layer, + dw_act_layer, + drop_path_rate, + ) + self.num_features = self.head_hidden_size = features[-1].out_channels + self.features = nn.Sequential(*features) + + self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) + + efficientnet_init_weights(self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^features\.(\d+)', + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.features, x, flatten=True) + else: + x = self.features(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_rexnet(variant, pretrained, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + RexNet, + variant, + pretrained, + feature_cfg=feature_cfg, + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + 'license': 'mit', **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'), + 'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'), + 'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'), + 'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'), + 'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'), + 'rexnetr_100.untrained': _cfg(), + 'rexnetr_130.untrained': _cfg(), + 'rexnetr_150.untrained': _cfg(), + 'rexnetr_200.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), + 'rexnetr_300.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), + 'rexnetr_200.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), + 'rexnetr_300.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), +}) + + +@register_model +def rexnet_100(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 1.0x""" + return _create_rexnet('rexnet_100', pretrained, **kwargs) + + +@register_model +def rexnet_130(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 1.3x""" + return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) + + +@register_model +def rexnet_150(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 1.5x""" + return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) + + +@register_model +def rexnet_200(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 2.0x""" + return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) + + +@register_model +def rexnet_300(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 3.0x""" + return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs) + + +@register_model +def rexnetr_100(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 1.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) + + +@register_model +def rexnetr_130(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 1.3x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) + + +@register_model +def rexnetr_150(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 1.5x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) + + +@register_model +def rexnetr_200(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 2.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) + + +@register_model +def rexnetr_300(pretrained=False, **kwargs) -> RexNet: + """ReXNet V1 3.0x w/ rounded (mod 16) channels""" + return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs) diff --git a/pytorch-image-models/timm/models/sknet.py b/pytorch-image-models/timm/models/sknet.py new file mode 100644 index 0000000000000000000000000000000000000000..01565875cb5a6284c04b57ab557169bfe6ea8a60 --- /dev/null +++ b/pytorch-image-models/timm/models/sknet.py @@ -0,0 +1,240 @@ +""" Selective Kernel Networks (ResNet base) + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) +and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer +to the original paper with some modifications of my own to better balance param count vs accuracy. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +from torch import nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SelectiveKernel, ConvNormAct, create_attn +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs +from .resnet import ResNet + + +class SelectiveKernelBasic(nn.Module): + expansion = 1 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + cardinality=1, + base_width=64, + sk_kwargs=None, + reduce_first=1, + dilation=1, + first_dilation=None, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_layer=None, + aa_layer=None, + drop_block=None, + drop_path=None, + ): + super(SelectiveKernelBasic, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock doest not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = SelectiveKernel( + inplanes, first_planes, stride=stride, dilation=first_dilation, + aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) + self.conv2 = ConvNormAct( + first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last(self): + if getattr(self.conv2.bn, 'weight', None) is not None: + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +class SelectiveKernelBottleneck(nn.Module): + expansion = 4 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + cardinality=1, + base_width=64, + sk_kwargs=None, + reduce_first=1, + dilation=1, + first_dilation=None, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_layer=None, + aa_layer=None, + drop_block=None, + drop_path=None, + ): + super(SelectiveKernelBottleneck, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) + self.conv2 = SelectiveKernel( + first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, + aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) + self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last(self): + if getattr(self.conv3.bn, 'weight', None) is not None: + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +def _create_skresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, + variant, + pretrained, + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'), + 'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'), + 'skresnet50.untrained': _cfg(), + 'skresnet50d.untrained': _cfg( + first_conv='conv1.0'), + 'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def skresnet18(pretrained=False, **kwargs) -> ResNet: + """Constructs a Selective Kernel ResNet-18 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last=False, **kwargs) + return _create_skresnet('skresnet18', pretrained, **model_args) + + +@register_model +def skresnet34(pretrained=False, **kwargs) -> ResNet: + """Constructs a Selective Kernel ResNet-34 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last=False, **kwargs) + return _create_skresnet('skresnet34', pretrained, **model_args) + + +@register_model +def skresnet50(pretrained=False, **kwargs) -> ResNet: + """Constructs a Select Kernel ResNet-50 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last=False, **kwargs) + return _create_skresnet('skresnet50', pretrained, **model_args) + + +@register_model +def skresnet50d(pretrained=False, **kwargs) -> ResNet: + """Constructs a Select Kernel ResNet-50-D model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) + return _create_skresnet('skresnet50d', pretrained, **model_args) + + +@register_model +def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet: + """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to + the SKNet-50 model in the Select Kernel Paper + """ + sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) + return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) + diff --git a/pytorch-image-models/timm/models/swin_transformer_v2.py b/pytorch-image-models/timm/models/swin_transformer_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..2174b4840f23069cb8b993e9f3d670e9185fad40 --- /dev/null +++ b/pytorch-image-models/timm/models/swin_transformer_v2.py @@ -0,0 +1,1088 @@ +""" Swin Transformer V2 +A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` + - https://arxiv.org/abs/2111.09883 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman +""" +# -------------------------------------------------------- +# Swin Transformer V2 +# Copyright (c) 2022 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import math +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, ClassifierHead,\ + resample_patch_embed, ndgrid, get_act_layer, LayerType +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_function +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['SwinTransformerV2'] # model_registry will add each entrypoint fn to this + +_int_or_tuple_2_t = Union[int, Tuple[int, int]] + + +def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor: + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows: torch.Tensor, window_size: Tuple[int, int], img_size: Tuple[int, int]) -> torch.Tensor: + """ + Args: + windows: (num_windows * B, window_size[0], window_size[1], C) + window_size (Tuple[int, int]): Window size + img_size (Tuple[int, int]): Image size + + Returns: + x: (B, H, W, C) + """ + H, W = img_size + C = windows.shape[-1] + x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + pretrained_window_size (tuple[int]): The height and width of the window in pre-training. + """ + + def __init__( + self, + dim: int, + window_size: Tuple[int, int], + num_heads: int, + qkv_bias: bool = True, + qkv_bias_separate: bool = False, + attn_drop: float = 0., + proj_drop: float = 0., + pretrained_window_size: Tuple[int, int] = (0, 0), + ) -> None: + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.pretrained_window_size = to_2tuple(pretrained_window_size) + self.num_heads = num_heads + self.qkv_bias_separate = qkv_bias_separate + + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) + + # mlp to generate continuous relative position bias + self.cpb_mlp = nn.Sequential( + nn.Linear(2, 512, bias=True), + nn.ReLU(inplace=True), + nn.Linear(512, num_heads, bias=False) + ) + + self.qkv = nn.Linear(dim, dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(dim)) + self.register_buffer('k_bias', torch.zeros(dim), persistent=False) + self.v_bias = nn.Parameter(torch.zeros(dim)) + else: + self.q_bias = None + self.k_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.softmax = nn.Softmax(dim=-1) + + self._make_pair_wise_relative_positions() + + def _make_pair_wise_relative_positions(self): + # get relative_coords_table + relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0]).to(torch.float32) + relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1]).to(torch.float32) + relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w)) + relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if self.pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= (self.pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + torch.abs(relative_coords_table) + 1.0) / math.log2(8) + self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(ndgrid(coords_h, coords_w)) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index, persistent=False) + + def set_window_size(self, window_size: Tuple[int, int]) -> None: + """Update window size & interpolate position embeddings + Args: + window_size (int): New window size + """ + window_size = to_2tuple(window_size) + if window_size != self.window_size: + self.window_size = window_size + self._make_pair_wise_relative_positions() + + def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + + if self.q_bias is None: + qkv = self.qkv(x) + else: + qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) + if self.qkv_bias_separate: + qkv = self.qkv(x) + qkv += qkv_bias + else: + qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + # cosine attention + attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp(self.logit_scale, max=math.log(1. / 0.01)).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) + relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + num_win = mask.shape[0] + attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerV2Block(nn.Module): + """ Swin Transformer Block. + """ + + def __init__( + self, + dim: int, + input_resolution: _int_or_tuple_2_t, + num_heads: int, + window_size: _int_or_tuple_2_t = 7, + shift_size: _int_or_tuple_2_t = 0, + always_partition: bool = False, + dynamic_mask: bool = False, + mlp_ratio: float = 4., + qkv_bias: bool = True, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + act_layer: LayerType = "gelu", + norm_layer: nn.Module = nn.LayerNorm, + pretrained_window_size: _int_or_tuple_2_t = 0, + ): + """ + Args: + dim: Number of input channels. + input_resolution: Input resolution. + num_heads: Number of attention heads. + window_size: Window size. + shift_size: Shift size for SW-MSA. + always_partition: Always partition into full windows and shift + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: If True, add a learnable bias to query, key, value. + proj_drop: Dropout rate. + attn_drop: Attention dropout rate. + drop_path: Stochastic depth rate. + act_layer: Activation layer. + norm_layer: Normalization layer. + pretrained_window_size: Window size in pretraining. + """ + super().__init__() + self.dim = dim + self.input_resolution = to_2tuple(input_resolution) + self.num_heads = num_heads + self.target_shift_size = to_2tuple(shift_size) # store for later resize + self.always_partition = always_partition + self.dynamic_mask = dynamic_mask + self.window_size, self.shift_size = self._calc_window_shift(window_size, shift_size) + self.window_area = self.window_size[0] * self.window_size[1] + self.mlp_ratio = mlp_ratio + act_layer = get_act_layer(act_layer) + + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + pretrained_window_size=to_2tuple(pretrained_window_size), + ) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.register_buffer( + "attn_mask", + None if self.dynamic_mask else self.get_attn_mask(), + persistent=False, + ) + + def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]: + if any(self.shift_size): + # calculate attention mask for SW-MSA + if x is None: + img_mask = torch.zeros((1, *self.input_resolution, 1)) # 1 H W 1 + else: + img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) # 1 H W 1 + cnt = 0 + for h in ( + (0, -self.window_size[0]), + (-self.window_size[0], -self.shift_size[0]), + (-self.shift_size[0], None), + ): + for w in ( + (0, -self.window_size[1]), + (-self.window_size[1], -self.shift_size[1]), + (-self.shift_size[1], None), + ): + img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt + cnt += 1 + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_area) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + return attn_mask + + def _calc_window_shift( + self, + target_window_size: _int_or_tuple_2_t, + target_shift_size: Optional[_int_or_tuple_2_t] = None, + ) -> Tuple[Tuple[int, int], Tuple[int, int]]: + target_window_size = to_2tuple(target_window_size) + if target_shift_size is None: + # if passed value is None, recalculate from default window_size // 2 if it was active + target_shift_size = self.target_shift_size + if any(target_shift_size): + # if there was previously a non-zero shift, recalculate based on current window_size + target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2) + else: + target_shift_size = to_2tuple(target_shift_size) + + if self.always_partition: + return target_window_size, target_shift_size + + target_window_size = to_2tuple(target_window_size) + target_shift_size = to_2tuple(target_shift_size) + window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] + shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] + return tuple(window_size), tuple(shift_size) + + def set_input_size( + self, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + always_partition: Optional[bool] = None, + ): + """ Updates the input resolution, window size. + + Args: + feat_size (Tuple[int, int]): New input resolution + window_size (int): New window size + always_partition: Change always_partition attribute if not None + """ + # Update input resolution + self.input_resolution = feat_size + if always_partition is not None: + self.always_partition = always_partition + self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size)) + self.window_area = self.window_size[0] * self.window_size[1] + self.attn.set_window_size(self.window_size) + self.register_buffer( + "attn_mask", + None if self.dynamic_mask else self.get_attn_mask(), + persistent=False, + ) + + def _attn(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, C = x.shape + + # cyclic shift + has_shift = any(self.shift_size) + if has_shift: + shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) + else: + shifted_x = x + + pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] + pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] + shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h)) + _, Hp, Wp, _ = shifted_x.shape + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + if getattr(self, 'dynamic_mask', False): + attn_mask = self.get_attn_mask(shifted_x) + else: + attn_mask = self.attn_mask + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) + shifted_x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) # B H' W' C + shifted_x = shifted_x[:, :H, :W, :].contiguous() + + # reverse cyclic shift + if has_shift: + x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) + else: + x = shifted_x + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, C = x.shape + x = x + self.drop_path1(self.norm1(self._attn(x))) + x = x.reshape(B, -1, C) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + x = x.reshape(B, H, W, C) + return x + + +class PatchMerging(nn.Module): + """ Patch Merging Layer. + """ + + def __init__( + self, + dim: int, + out_dim: Optional[int] = None, + norm_layer: nn.Module = nn.LayerNorm + ): + """ + Args: + dim (int): Number of input channels. + out_dim (int): Number of output channels (or 2 * dim if None) + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + super().__init__() + self.dim = dim + self.out_dim = out_dim or 2 * dim + self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) + self.norm = norm_layer(self.out_dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, C = x.shape + + pad_values = (0, 0, 0, W % 2, 0, H % 2) + x = nn.functional.pad(x, pad_values) + _, H, W, _ = x.shape + + x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) + x = self.reduction(x) + x = self.norm(x) + return x + + +class SwinTransformerV2Stage(nn.Module): + """ A Swin Transformer V2 Stage. + """ + + def __init__( + self, + dim: int, + out_dim: int, + input_resolution: _int_or_tuple_2_t, + depth: int, + num_heads: int, + window_size: _int_or_tuple_2_t, + always_partition: bool = False, + dynamic_mask: bool = False, + downsample: bool = False, + mlp_ratio: float = 4., + qkv_bias: bool = True, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + act_layer: Union[str, Callable] = 'gelu', + norm_layer: nn.Module = nn.LayerNorm, + pretrained_window_size: _int_or_tuple_2_t = 0, + output_nchw: bool = False, + ) -> None: + """ + Args: + dim: Number of input channels. + out_dim: Number of output channels. + input_resolution: Input resolution. + depth: Number of blocks. + num_heads: Number of attention heads. + window_size: Local window size. + always_partition: Always partition into full windows and shift + dynamic_mask: Create attention mask in forward based on current input size + downsample: Use downsample layer at start of the block. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: If True, add a learnable bias to query, key, value. + proj_drop: Projection dropout rate + attn_drop: Attention dropout rate. + drop_path: Stochastic depth rate. + act_layer: Activation layer type. + norm_layer: Normalization layer. + pretrained_window_size: Local window size in pretraining. + output_nchw: Output tensors on NCHW format instead of NHWC. + """ + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution + self.depth = depth + self.output_nchw = output_nchw + self.grad_checkpointing = False + window_size = to_2tuple(window_size) + shift_size = tuple([w // 2 for w in window_size]) + + # patch merging / downsample layer + if downsample: + self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer) + else: + assert dim == out_dim + self.downsample = nn.Identity() + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerV2Block( + dim=out_dim, + input_resolution=self.output_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else shift_size, + always_partition=always_partition, + dynamic_mask=dynamic_mask, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + act_layer=act_layer, + norm_layer=norm_layer, + pretrained_window_size=pretrained_window_size, + ) + for i in range(depth)]) + + def set_input_size( + self, + feat_size: Tuple[int, int], + window_size: int, + always_partition: Optional[bool] = None, + ): + """ Updates the resolution, window size and so the pair-wise relative positions. + + Args: + feat_size: New input (feature) resolution + window_size: New window size + always_partition: Always partition / shift the window + """ + self.input_resolution = feat_size + if isinstance(self.downsample, nn.Identity): + self.output_resolution = feat_size + else: + assert isinstance(self.downsample, PatchMerging) + self.output_resolution = tuple(i // 2 for i in feat_size) + for block in self.blocks: + block.set_input_size( + feat_size=self.output_resolution, + window_size=window_size, + always_partition=always_partition, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.downsample(x) + + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + return x + + def _init_respostnorm(self) -> None: + for blk in self.blocks: + nn.init.constant_(blk.norm1.bias, 0) + nn.init.constant_(blk.norm1.weight, 0) + nn.init.constant_(blk.norm2.bias, 0) + nn.init.constant_(blk.norm2.weight, 0) + + +class SwinTransformerV2(nn.Module): + """ Swin Transformer V2 + + A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` + - https://arxiv.org/abs/2111.09883 + """ + + def __init__( + self, + img_size: _int_or_tuple_2_t = 224, + patch_size: int = 4, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + embed_dim: int = 96, + depths: Tuple[int, ...] = (2, 2, 6, 2), + num_heads: Tuple[int, ...] = (3, 6, 12, 24), + window_size: _int_or_tuple_2_t = 7, + always_partition: bool = False, + strict_img_size: bool = True, + mlp_ratio: float = 4., + qkv_bias: bool = True, + drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0.1, + act_layer: Union[str, Callable] = 'gelu', + norm_layer: Callable = nn.LayerNorm, + pretrained_window_sizes: Tuple[int, ...] = (0, 0, 0, 0), + **kwargs, + ): + """ + Args: + img_size: Input image size. + patch_size: Patch size. + in_chans: Number of input image channels. + num_classes: Number of classes for classification head. + embed_dim: Patch embedding dimension. + depths: Depth of each Swin Transformer stage (layer). + num_heads: Number of attention heads in different layers. + window_size: Window size. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: If True, add a learnable bias to query, key, value. + drop_rate: Head dropout rate. + proj_drop_rate: Projection dropout rate. + attn_drop_rate: Attention dropout rate. + drop_path_rate: Stochastic depth rate. + norm_layer: Normalization layer. + act_layer: Activation layer type. + patch_norm: If True, add normalization after patch embedding. + pretrained_window_sizes: Pretrained window sizes of each layer. + output_fmt: Output tensor format if not None, otherwise output 'NHWC' by default. + """ + super().__init__() + + self.num_classes = num_classes + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.output_fmt = 'NHWC' + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (self.num_layers - 1)) + self.feature_info = [] + + if not isinstance(embed_dim, (tuple, list)): + embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim[0], + norm_layer=norm_layer, + strict_img_size=strict_img_size, + output_fmt='NHWC', + ) + grid_size = self.patch_embed.grid_size + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + layers = [] + in_dim = embed_dim[0] + scale = 1 + for i in range(self.num_layers): + out_dim = embed_dim[i] + layers += [SwinTransformerV2Stage( + dim=in_dim, + out_dim=out_dim, + input_resolution=(grid_size[0] // scale, grid_size[1] // scale), + depth=depths[i], + downsample=i > 0, + num_heads=num_heads[i], + window_size=window_size, + always_partition=always_partition, + dynamic_mask=not strict_img_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + act_layer=act_layer, + norm_layer=norm_layer, + pretrained_window_size=pretrained_window_sizes[i], + )] + in_dim = out_dim + if i > 0: + scale *= 2 + self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')] + + self.layers = nn.Sequential(*layers) + self.norm = norm_layer(self.num_features) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + input_fmt=self.output_fmt, + ) + + self.apply(self._init_weights) + for bly in self.layers: + bly._init_respostnorm() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + def set_input_size( + self, + img_size: Optional[Tuple[int, int]] = None, + patch_size: Optional[Tuple[int, int]] = None, + window_size: Optional[Tuple[int, int]] = None, + window_ratio: Optional[int] = 8, + always_partition: Optional[bool] = None, + ): + """Updates the image resolution, window size, and so the pair-wise relative positions. + + Args: + img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used + patch_size (Optional[Tuple[int, int]): New patch size, if None use current patch size + window_size (Optional[int]): New window size, if None based on new_img_size // window_div + window_ratio (int): divisor for calculating window size from patch grid size + always_partition: always partition / shift windows even if feat size is < window + """ + if img_size is not None or patch_size is not None: + self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) + grid_size = self.patch_embed.grid_size + + if window_size is None and window_ratio is not None: + window_size = tuple([s // window_ratio for s in grid_size]) + + for index, stage in enumerate(self.layers): + stage_scale = 2 ** max(index - 1, 0) + stage.set_input_size( + feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale), + window_size=window_size, + always_partition=always_partition, + ) + + @torch.jit.ignore + def no_weight_decay(self): + nod = set() + for n, m in self.named_modules(): + if any([kw in n for kw in ("cpb_mlp", "logit_scale")]): + nod.add(n) + return nod + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^absolute_pos_embed|patch_embed', # stem and embed + blocks=r'^layers\.(\d+)' if coarse else [ + (r'^layers\.(\d+).downsample', (0,)), + (r'^layers\.(\d+)\.\w+\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for l in self.layers: + l.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.layers), indices) + + # forward pass + x = self.patch_embed(x) + + num_stages = len(self.layers) + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.layers + else: + stages = self.layers[:max_index + 1] + for i, stage in enumerate(stages): + x = stage(x) + if i in take_indices: + if norm and i == num_stages - 1: + x_inter = self.norm(x) # applying final norm last intermediate + else: + x_inter = x + x_inter = x_inter.permute(0, 3, 1, 2).contiguous() + intermediates.append(x_inter) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.layers), indices) + self.layers = self.layers[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.layers(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + state_dict = state_dict.get('model', state_dict) + state_dict = state_dict.get('state_dict', state_dict) + native_checkpoint = 'head.fc.weight' in state_dict + out_dict = {} + import re + for k, v in state_dict.items(): + if any([n in k for n in ('relative_position_index', 'relative_coords_table', 'attn_mask')]): + continue # skip buffers that should not be persistent + + if 'patch_embed.proj.weight' in k: + _, _, H, W = model.patch_embed.proj.weight.shape + if v.shape[-2] != H or v.shape[-1] != W: + v = resample_patch_embed( + v, + (H, W), + interpolation='bicubic', + antialias=True, + verbose=True, + ) + + if not native_checkpoint: + # skip layer remapping for updated checkpoints + k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) + k = k.replace('head.', 'head.fc.') + out_dict[k] = v + + return out_dict + + +def _create_swin_transformer_v2(variant, pretrained=False, **kwargs): + default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1)))) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + SwinTransformerV2, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', + 'license': 'mit', **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth', + ), + 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth', + ), + 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + + 'swinv2_tiny_window8_256.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth', + ), + 'swinv2_tiny_window16_256.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth', + ), + 'swinv2_small_window8_256.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth', + ), + 'swinv2_small_window16_256.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth', + ), + 'swinv2_base_window8_256.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth', + ), + 'swinv2_base_window16_256.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth', + ), + + 'swinv2_base_window12_192.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth', + num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6) + ), + 'swinv2_large_window12_192.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth', + num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6) + ), +}) + + +@register_model +def swinv2_tiny_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer_v2( + 'swinv2_tiny_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_tiny_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer_v2( + 'swinv2_tiny_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_small_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer_v2( + 'swinv2_small_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_small_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer_v2( + 'swinv2_small_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_base_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) + return _create_swin_transformer_v2( + 'swinv2_base_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_base_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) + return _create_swin_transformer_v2( + 'swinv2_base_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_base_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) + return _create_swin_transformer_v2( + 'swinv2_base_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_base_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict( + window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), + pretrained_window_sizes=(12, 12, 12, 6)) + return _create_swin_transformer_v2( + 'swinv2_base_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_base_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict( + window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), + pretrained_window_sizes=(12, 12, 12, 6)) + return _create_swin_transformer_v2( + 'swinv2_base_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_large_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict(window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) + return _create_swin_transformer_v2( + 'swinv2_large_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_large_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict( + window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), + pretrained_window_sizes=(12, 12, 12, 6)) + return _create_swin_transformer_v2( + 'swinv2_large_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_large_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: + """ + """ + model_args = dict( + window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), + pretrained_window_sizes=(12, 12, 12, 6)) + return _create_swin_transformer_v2( + 'swinv2_large_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +register_model_deprecations(__name__, { + 'swinv2_base_window12_192_22k': 'swinv2_base_window12_192.ms_in22k', + 'swinv2_base_window12to16_192to256_22kft1k': 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k', + 'swinv2_base_window12to24_192to384_22kft1k': 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k', + 'swinv2_large_window12_192_22k': 'swinv2_large_window12_192.ms_in22k', + 'swinv2_large_window12to16_192to256_22kft1k': 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k', + 'swinv2_large_window12to24_192to384_22kft1k': 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k', +}) diff --git a/pytorch-image-models/timm/models/swin_transformer_v2_cr.py b/pytorch-image-models/timm/models/swin_transformer_v2_cr.py new file mode 100644 index 0000000000000000000000000000000000000000..dceb3d50401766588b8416eaadc8c7b924ebb49e --- /dev/null +++ b/pytorch-image-models/timm/models/swin_transformer_v2_cr.py @@ -0,0 +1,1153 @@ +""" Swin Transformer V2 + +A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` + - https://arxiv.org/pdf/2111.09883 + +Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below + +This implementation is experimental and subject to change in manners that will break weight compat: +* Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads? + * currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head) +* The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at + GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial. +* num_heads per stage is not detailed for Huge and Giant model variants +* 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts +* experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme + +Noteworthy additions over official Swin v1: +* MLP relative position embedding is looking promising and adapts to different image/window sizes +* This impl has been designed to allow easy change of image size with matching window size changes +* Non-square image size and window size are supported + +Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman +""" +# -------------------------------------------------------- +# Swin Transformer V2 reimplementation +# Copyright (c) 2021 Christoph Reich +# Licensed under The MIT License [see LICENSE for details] +# Written by Christoph Reich +# -------------------------------------------------------- +import logging +import math +from typing import Tuple, Optional, List, Union, Any, Type + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert, ndgrid +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_function +from ._manipulate import named_apply +from ._registry import generate_default_cfgs, register_model + +__all__ = ['SwinTransformerV2Cr'] # model_registry will add each entrypoint fn to this + +_logger = logging.getLogger(__name__) + + +def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor: + """Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """ + return x.permute(0, 2, 3, 1) + + +def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor: + """Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """ + return x.permute(0, 3, 1, 2) + + +def window_partition(x, window_size: Tuple[int, int]): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): + """ + Args: + windows: (num_windows * B, window_size[0], window_size[1], C) + window_size (Tuple[int, int]): Window size + img_size (Tuple[int, int]): Image size + + Returns: + x: (B, H, W, C) + """ + H, W = img_size + C = windows.shape[-1] + x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) + return x + + +class WindowMultiHeadAttention(nn.Module): + r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias. + + Args: + dim (int): Number of input features + window_size (int): Window size + num_heads (int): Number of attention heads + drop_attn (float): Dropout rate of attention map + drop_proj (float): Dropout rate after projection + meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network + sequential_attn (bool): If true sequential self-attention is performed + """ + + def __init__( + self, + dim: int, + num_heads: int, + window_size: Tuple[int, int], + drop_attn: float = 0.0, + drop_proj: float = 0.0, + meta_hidden_dim: int = 384, # FIXME what's the optimal value? + sequential_attn: bool = False, + ) -> None: + super(WindowMultiHeadAttention, self).__init__() + assert dim % num_heads == 0, \ + "The number of input features (in_features) are not divisible by the number of heads (num_heads)." + self.in_features: int = dim + self.window_size: Tuple[int, int] = to_2tuple(window_size) + self.num_heads: int = num_heads + self.sequential_attn: bool = sequential_attn + + self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True) + self.attn_drop = nn.Dropout(drop_attn) + self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True) + self.proj_drop = nn.Dropout(drop_proj) + # meta network for positional encodings + self.meta_mlp = Mlp( + 2, # x, y + hidden_features=meta_hidden_dim, + out_features=num_heads, + act_layer=nn.ReLU, + drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without? + ) + # NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads))) + self._make_pair_wise_relative_positions() + + def _make_pair_wise_relative_positions(self) -> None: + """Method initializes the pair-wise relative positions to compute the positional biases.""" + device = self.logit_scale.device + coordinates = torch.stack(ndgrid( + torch.arange(self.window_size[0], device=device), + torch.arange(self.window_size[1], device=device) + ), dim=0).flatten(1) + relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :] + relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float() + relative_coordinates_log = torch.sign(relative_coordinates) * torch.log( + 1.0 + relative_coordinates.abs()) + self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False) + + def set_window_size(self, window_size: Tuple[int, int]) -> None: + """Update window size & interpolate position embeddings + Args: + window_size (int): New window size + """ + window_size = to_2tuple(window_size) + if window_size != self.window_size: + self.window_size = window_size + self._make_pair_wise_relative_positions() + + def _relative_positional_encodings(self) -> torch.Tensor: + """Method computes the relative positional encodings + + Returns: + relative_position_bias (torch.Tensor): Relative positional encodings + (1, number of heads, window size ** 2, window size ** 2) + """ + window_area = self.window_size[0] * self.window_size[1] + relative_position_bias = self.meta_mlp(self.relative_coordinates_log) + relative_position_bias = relative_position_bias.transpose(1, 0).reshape( + self.num_heads, window_area, window_area + ) + relative_position_bias = relative_position_bias.unsqueeze(0) + return relative_position_bias + + def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: + """ Forward pass. + Args: + x (torch.Tensor): Input tensor of the shape (B * windows, N, C) + mask (Optional[torch.Tensor]): Attention mask for the shift case + + Returns: + Output tensor of the shape [B * windows, N, C] + """ + Bw, L, C = x.shape + + qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + query, key, value = qkv.unbind(0) + + # compute attention map with scaled cosine attention + attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp() + attn = attn * logit_scale + attn = attn + self._relative_positional_encodings() + + if mask is not None: + # Apply mask if utilized + num_win: int = mask.shape[0] + attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L) + attn = attn + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, L, L) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerV2CrBlock(nn.Module): + r"""This class implements the Swin transformer block. + + Args: + dim (int): Number of input channels + num_heads (int): Number of attention heads to be utilized + feat_size (Tuple[int, int]): Input resolution + window_size (Tuple[int, int]): Window size to be utilized + shift_size (int): Shifting size to be used + mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels + proj_drop (float): Dropout in input mapping + drop_attn (float): Dropout rate of attention map + drop_path (float): Dropout in main path + extra_norm (bool): Insert extra norm on 'main' branch if True + sequential_attn (bool): If true sequential self-attention is performed + norm_layer (Type[nn.Module]): Type of normalization layer to be utilized + """ + + def __init__( + self, + dim: int, + num_heads: int, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + shift_size: Tuple[int, int] = (0, 0), + always_partition: bool = False, + dynamic_mask: bool = False, + mlp_ratio: float = 4.0, + init_values: Optional[float] = 0, + proj_drop: float = 0.0, + drop_attn: float = 0.0, + drop_path: float = 0.0, + extra_norm: bool = False, + sequential_attn: bool = False, + norm_layer: Type[nn.Module] = nn.LayerNorm, + ): + super(SwinTransformerV2CrBlock, self).__init__() + self.dim: int = dim + self.feat_size: Tuple[int, int] = feat_size + self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size) + self.always_partition = always_partition + self.dynamic_mask = dynamic_mask + self.window_size, self.shift_size = self._calc_window_shift(window_size) + self.window_area = self.window_size[0] * self.window_size[1] + self.init_values: Optional[float] = init_values + + # attn branch + self.attn = WindowMultiHeadAttention( + dim=dim, + num_heads=num_heads, + window_size=self.window_size, + drop_attn=drop_attn, + drop_proj=proj_drop, + sequential_attn=sequential_attn, + ) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() + + # mlp branch + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + drop=proj_drop, + out_features=dim, + ) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() + + # Extra main branch norm layer mentioned for Huge/Giant models in V2 paper. + # Also being used as final network norm and optional stage ending norm while still in a C-last format. + self.norm3 = norm_layer(dim) if extra_norm else nn.Identity() + + self.register_buffer( + "attn_mask", + None if self.dynamic_mask else self.get_attn_mask(), + persistent=False, + ) + self.init_weights() + + def _calc_window_shift( + self, + target_window_size: Tuple[int, int], + ) -> Tuple[Tuple[int, int], Tuple[int, int]]: + target_window_size = to_2tuple(target_window_size) + target_shift_size = self.target_shift_size + if any(target_shift_size): + # if non-zero, recalculate shift from current window size in case window size has changed + target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2) + + if self.always_partition: + return target_window_size, target_shift_size + + window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)] + shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, target_shift_size)] + return tuple(window_size), tuple(shift_size) + + def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]: + """Method generates the attention mask used in shift case.""" + # Make masks for shift case + if any(self.shift_size): + # calculate attention mask for SW-MSA + if x is None: + img_mask = torch.zeros((1, *self.feat_size, 1)) # 1 H W 1 + else: + img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) # 1 H W 1 + cnt = 0 + for h in ( + (0, -self.window_size[0]), + (-self.window_size[0], -self.shift_size[0]), + (-self.shift_size[0], None), + ): + for w in ( + (0, -self.window_size[1]), + (-self.window_size[1], -self.shift_size[1]), + (-self.shift_size[1], None), + ): + img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt + cnt += 1 + mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_area) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + return attn_mask + + def init_weights(self): + # extra, module specific weight init + if self.init_values is not None: + nn.init.constant_(self.norm1.weight, self.init_values) + nn.init.constant_(self.norm2.weight, self.init_values) + + def set_input_size(self, feat_size: Tuple[int, int], window_size: Tuple[int, int]) -> None: + """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. + + Args: + feat_size (Tuple[int, int]): New input resolution + window_size (int): New window size + """ + # Update input resolution + self.feat_size: Tuple[int, int] = feat_size + self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size)) + self.window_area = self.window_size[0] * self.window_size[1] + self.attn.set_window_size(self.window_size) + self.register_buffer( + "attn_mask", + None if self.dynamic_mask else self.get_attn_mask(), + persistent=False, + ) + + def _shifted_window_attn(self, x): + B, H, W, C = x.shape + + # cyclic shift + sh, sw = self.shift_size + do_shift: bool = any(self.shift_size) + if do_shift: + # FIXME PyTorch XLA needs cat impl, roll not lowered + # x = torch.cat([x[:, sh:], x[:, :sh]], dim=1) + # x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2) + x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2)) + + pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] + pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] + x = torch.nn.functional.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + _, Hp, Wp, _ = x.shape + + # partition windows + x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) + + # W-MSA/SW-MSA + if getattr(self, 'dynamic_mask', False): + attn_mask = self.get_attn_mask(x) + else: + attn_mask = self.attn_mask + attn_windows = self.attn(x_windows, mask=attn_mask) # num_windows * B, window_size * window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) + x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) # B H' W' C + x = x[:, :H, :W, :].contiguous() + + # reverse cyclic shift + if do_shift: + # FIXME PyTorch XLA needs cat impl, roll not lowered + # x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1) + # x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2) + x = torch.roll(x, shifts=(sh, sw), dims=(1, 2)) + + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward pass. + + Args: + x (torch.Tensor): Input tensor of the shape [B, C, H, W] + + Returns: + output (torch.Tensor): Output tensor of the shape [B, C, H, W] + """ + # post-norm branches (op -> norm -> drop) + x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x))) + + B, H, W, C = x.shape + x = x.reshape(B, -1, C) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant) + x = x.reshape(B, H, W, C) + return x + + +class PatchMerging(nn.Module): + """ This class implements the patch merging as a strided convolution with a normalization before. + Args: + dim (int): Number of input channels + norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. + """ + + def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None: + super(PatchMerging, self).__init__() + self.norm = norm_layer(4 * dim) + self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ Forward pass. + Args: + x (torch.Tensor): Input tensor of the shape [B, C, H, W] + Returns: + output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] + """ + B, H, W, C = x.shape + + pad_values = (0, 0, 0, W % 2, 0, H % 2) + x = nn.functional.pad(x, pad_values) + _, H, W, _ = x.shape + + x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) + x = self.norm(x) + x = self.reduction(x) + return x + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding """ + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + norm_layer=None, + strict_img_size=True, + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.strict_img_size = strict_img_size + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def set_input_size(self, img_size: Tuple[int, int]): + img_size = to_2tuple(img_size) + if img_size != self.img_size: + self.img_size = img_size + self.grid_size = (img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + + def forward(self, x): + B, C, H, W = x.shape + if self.strict_img_size: + _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + x = self.proj(x) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + +class SwinTransformerV2CrStage(nn.Module): + r"""This class implements a stage of the Swin transformer including multiple layers. + + Args: + embed_dim (int): Number of input channels + depth (int): Depth of the stage (number of layers) + downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper) + feat_size (Tuple[int, int]): input feature map size (H, W) + num_heads (int): Number of attention heads to be utilized + window_size (int): Window size to be utilized + mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels + proj_drop (float): Dropout in input mapping + drop_attn (float): Dropout rate of attention map + drop_path (float): Dropout in main path + norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm + extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks + extra_norm_stage (bool): End each stage with an extra norm layer in main branch + sequential_attn (bool): If true sequential self-attention is performed + """ + + def __init__( + self, + embed_dim: int, + depth: int, + downscale: bool, + num_heads: int, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + always_partition: bool = False, + dynamic_mask: bool = False, + mlp_ratio: float = 4.0, + init_values: Optional[float] = 0.0, + proj_drop: float = 0.0, + drop_attn: float = 0.0, + drop_path: Union[List[float], float] = 0.0, + norm_layer: Type[nn.Module] = nn.LayerNorm, + extra_norm_period: int = 0, + extra_norm_stage: bool = False, + sequential_attn: bool = False, + ): + super(SwinTransformerV2CrStage, self).__init__() + self.downscale: bool = downscale + self.grad_checkpointing: bool = False + self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size + + if downscale: + self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer) + embed_dim = embed_dim * 2 + else: + self.downsample = nn.Identity() + + def _extra_norm(index): + i = index + 1 + if extra_norm_period and i % extra_norm_period == 0: + return True + return i == depth if extra_norm_stage else False + + self.blocks = nn.Sequential(*[ + SwinTransformerV2CrBlock( + dim=embed_dim, + num_heads=num_heads, + feat_size=self.feat_size, + window_size=window_size, + always_partition=always_partition, + dynamic_mask=dynamic_mask, + shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]), + mlp_ratio=mlp_ratio, + init_values=init_values, + proj_drop=proj_drop, + drop_attn=drop_attn, + drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path, + extra_norm=_extra_norm(index), + sequential_attn=sequential_attn, + norm_layer=norm_layer, + ) + for index in range(depth)] + ) + + def set_input_size( + self, + feat_size: Tuple[int, int], + window_size: int, + always_partition: Optional[bool] = None, + ): + """ Updates the resolution to utilize and the window size and so the pair-wise relative positions. + + Args: + window_size (int): New window size + feat_size (Tuple[int, int]): New input resolution + """ + self.feat_size = (feat_size[0] // 2, feat_size[1] // 2) if self.downscale else feat_size + for block in self.blocks: + block.set_input_size( + feat_size=self.feat_size, + window_size=window_size, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward pass. + Args: + x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C] + Returns: + output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] + """ + x = bchw_to_bhwc(x) + x = self.downsample(x) + for block in self.blocks: + # Perform checkpointing if utilized + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(block, x) + else: + x = block(x) + x = bhwc_to_bchw(x) + return x + + +class SwinTransformerV2Cr(nn.Module): + r""" Swin Transformer V2 + A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - + https://arxiv.org/pdf/2111.09883 + + Args: + img_size: Input resolution. + window_size: Window size. If None, grid_size // window_div + window_ratio: Window size to patch grid ratio. + patch_size: Patch size. + in_chans: Number of input channels. + depths: Depth of the stage (number of layers). + num_heads: Number of attention heads to be utilized. + embed_dim: Patch embedding dimension. + num_classes: Number of output classes. + mlp_ratio: Ratio of the hidden dimension in the FFN to the input channels. + drop_rate: Dropout rate. + proj_drop_rate: Projection dropout rate. + attn_drop_rate: Dropout rate of attention map. + drop_path_rate: Stochastic depth rate. + norm_layer: Type of normalization layer to be utilized. + extra_norm_period: Insert extra norm layer on main branch every N (period) blocks in stage + extra_norm_stage: End each stage with an extra norm layer in main branch + sequential_attn: If true sequential self-attention is performed. + """ + + def __init__( + self, + img_size: Tuple[int, int] = (224, 224), + patch_size: int = 4, + window_size: Optional[int] = None, + window_ratio: int = 8, + always_partition: bool = False, + strict_img_size: bool = True, + in_chans: int = 3, + num_classes: int = 1000, + embed_dim: int = 96, + depths: Tuple[int, ...] = (2, 2, 6, 2), + num_heads: Tuple[int, ...] = (3, 6, 12, 24), + mlp_ratio: float = 4.0, + init_values: Optional[float] = 0., + drop_rate: float = 0.0, + proj_drop_rate: float = 0.0, + attn_drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + norm_layer: Type[nn.Module] = nn.LayerNorm, + extra_norm_period: int = 0, + extra_norm_stage: bool = False, + sequential_attn: bool = False, + global_pool: str = 'avg', + weight_init='skip', + **kwargs: Any + ) -> None: + super(SwinTransformerV2Cr, self).__init__() + img_size = to_2tuple(img_size) + self.num_classes: int = num_classes + self.patch_size: int = patch_size + self.img_size: Tuple[int, int] = img_size + self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) + self.feature_info = [] + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer, + strict_img_size=strict_img_size, + ) + grid_size = self.patch_embed.grid_size + if window_size is None: + self.window_size = tuple([s // window_ratio for s in grid_size]) + else: + self.window_size = to_2tuple(window_size) + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + stages = [] + in_dim = embed_dim + in_scale = 1 + for stage_idx, (depth, num_heads) in enumerate(zip(depths, num_heads)): + stages += [SwinTransformerV2CrStage( + embed_dim=in_dim, + depth=depth, + downscale=stage_idx != 0, + feat_size=(grid_size[0] // in_scale, grid_size[1] // in_scale), + num_heads=num_heads, + window_size=self.window_size, + always_partition=always_partition, + dynamic_mask=not strict_img_size, + mlp_ratio=mlp_ratio, + init_values=init_values, + proj_drop=proj_drop_rate, + drop_attn=attn_drop_rate, + drop_path=dpr[stage_idx], + extra_norm_period=extra_norm_period, + extra_norm_stage=extra_norm_stage or (stage_idx + 1) == len(depths), # last stage ends w/ norm + sequential_attn=sequential_attn, + norm_layer=norm_layer, + )] + if stage_idx != 0: + in_dim *= 2 + in_scale *= 2 + self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')] + self.stages = nn.Sequential(*stages) + + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + # current weight init skips custom init and uses pytorch layer defaults, seems to work well + # FIXME more experiments needed + if weight_init != 'skip': + named_apply(init_weights, self) + + def set_input_size( + self, + img_size: Optional[Tuple[int, int]] = None, + window_size: Optional[Tuple[int, int]] = None, + window_ratio: int = 8, + always_partition: Optional[bool] = None, + ) -> None: + """Updates the image resolution, window size and so the pair-wise relative positions. + + Args: + img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used + window_size (Optional[int]): New window size, if None based on new_img_size // window_div + window_ratio (int): divisor for calculating window size from patch grid size + always_partition: always partition / shift windows even if feat size is < window + """ + if img_size is not None: + self.patch_embed.set_input_size(img_size=img_size) + grid_size = self.patch_embed.grid_size + + if window_size is None and window_ratio is not None: + window_size = tuple([s // window_ratio for s in grid_size]) + + for index, stage in enumerate(self.stages): + stage_scale = 2 ** max(index - 1, 0) + stage.set_input_size( + feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale), + window_size=window_size, + always_partition=always_partition, + ) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^patch_embed', # stem and embed + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+)\.\w+\.(\d+)', None), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore() + def get_classifier(self) -> nn.Module: + """Method returns the classification head of the model. + Returns: + head (nn.Module): Current classification head + """ + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: + """Method results the classification head + + Args: + num_classes (int): Number of classes to be predicted + global_pool (str): Unused + """ + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stages), indices) + + # forward pass + x = self.patch_embed(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.stages + else: + stages = self.stages[:max_index + 1] + for i, stage in enumerate(stages): + x = stage(x) + if i in take_indices: + intermediates.append(x) + + if intermediates_only: + return intermediates + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stages), indices) + self.stages = self.stages[:max_index + 1] # truncate blocks + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def init_weights(module: nn.Module, name: str = ''): + # FIXME WIP determining if there's a better weight init + if isinstance(module, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) + nn.init.uniform_(module.weight, -val, val) + elif 'head' in name: + nn.init.zeros_(module.weight) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + state_dict = state_dict.get('model', state_dict) + state_dict = state_dict.get('state_dict', state_dict) + if 'head.fc.weight' in state_dict: + return state_dict + out_dict = {} + for k, v in state_dict.items(): + if 'tau' in k: + # convert old tau based checkpoints -> logit_scale (inverse) + v = torch.log(1 / v) + k = k.replace('tau', 'logit_scale') + k = k.replace('head.', 'head.fc.') + out_dict[k] = v + return out_dict + + +def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs): + default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1)))) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + SwinTransformerV2Cr, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, + 'input_size': (3, 224, 224), + 'pool_size': (7, 7), + 'crop_pct': 0.9, + 'interpolation': 'bicubic', + 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', + 'classifier': 'head.fc', + **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'swinv2_cr_tiny_384.untrained': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_tiny_224.untrained': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_tiny_ns_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth", + input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_small_384.untrained': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_small_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth", + input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_small_ns_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth", + input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_small_ns_256.untrained': _cfg( + url="", input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)), + 'swinv2_cr_base_384.untrained': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_base_224.untrained': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_base_ns_224.untrained': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_large_384.untrained': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_large_224.untrained': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_huge_384.untrained': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_huge_224.untrained': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_giant_384.untrained': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_giant_224.untrained': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), +}) + + +@register_model +def swinv2_cr_tiny_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-T V2 CR @ 384x384, trained ImageNet-1k""" + model_args = dict( + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_tiny_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-T V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms. + ** Experimental, may make default if results are improved. ** + """ + model_args = dict( + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + extra_norm_stage=True, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_small_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-S V2 CR @ 384x384, trained ImageNet-1k""" + model_args = dict( + embed_dim=96, + depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_small_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=96, + depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_small_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=96, + depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), + extra_norm_stage=True, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_small_ns_256(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-S V2 CR @ 256x256, trained ImageNet-1k""" + model_args = dict( + embed_dim=96, + depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), + extra_norm_stage=True, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_base_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-B V2 CR @ 384x384, trained ImageNet-1k""" + model_args = dict( + embed_dim=128, + depths=(2, 2, 18, 2), + num_heads=(4, 8, 16, 32), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_base_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=128, + depths=(2, 2, 18, 2), + num_heads=(4, 8, 16, 32), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_base_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=128, + depths=(2, 2, 18, 2), + num_heads=(4, 8, 16, 32), + extra_norm_stage=True, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_large_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-L V2 CR @ 384x384, trained ImageNet-1k""" + model_args = dict( + embed_dim=192, + depths=(2, 2, 18, 2), + num_heads=(6, 12, 24, 48), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_large_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-L V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=192, + depths=(2, 2, 18, 2), + num_heads=(6, 12, 24, 48), + ) + return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_huge_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-H V2 CR @ 384x384, trained ImageNet-1k""" + model_args = dict( + embed_dim=352, + depths=(2, 2, 18, 2), + num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values + extra_norm_period=6, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_huge_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-H V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=352, + depths=(2, 2, 18, 2), + num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values + extra_norm_period=6, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_giant_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-G V2 CR @ 384x384, trained ImageNet-1k""" + model_args = dict( + embed_dim=512, + depths=(2, 2, 42, 2), + num_heads=(16, 32, 64, 128), + extra_norm_period=6, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swinv2_cr_giant_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: + """Swin-G V2 CR @ 224x224, trained ImageNet-1k""" + model_args = dict( + embed_dim=512, + depths=(2, 2, 42, 2), + num_heads=(16, 32, 64, 128), + extra_norm_period=6, + ) + return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/tiny_vit.py b/pytorch-image-models/timm/models/tiny_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..12a5ef2f166c65ece628b95fc72445b410fec3fb --- /dev/null +++ b/pytorch-image-models/timm/models/tiny_vit.py @@ -0,0 +1,715 @@ +""" TinyViT + +Paper: `TinyViT: Fast Pretraining Distillation for Small Vision Transformers` + - https://arxiv.org/abs/2207.10666 + +Adapted from official impl at https://github.com/microsoft/Cream/tree/main/TinyViT +""" + +__all__ = ['TinyVit'] + +import itertools +from functools import partial +from typing import Dict, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import LayerNorm2d, NormMlpClassifierHead, DropPath,\ + trunc_normal_, resize_rel_pos_bias_table_levit, use_fused_attn +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_module +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + + +class ConvNorm(torch.nn.Sequential): + def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): + super().__init__() + self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False) + self.bn = nn.BatchNorm2d(out_chs) + torch.nn.init.constant_(self.bn.weight, bn_weight_init) + torch.nn.init.constant_(self.bn.bias, 0) + + @torch.no_grad() + def fuse(self): + c, bn = self.conv, self.bn + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / \ + (bn.running_var + bn.eps) ** 0.5 + m = torch.nn.Conv2d( + w.size(1) * self.conv.groups, w.size(0), w.shape[2:], + stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class PatchEmbed(nn.Module): + def __init__(self, in_chs, out_chs, act_layer): + super().__init__() + self.stride = 4 + self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) + self.act = act_layer() + self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) + + def forward(self, x): + x = self.conv1(x) + x = self.act(x) + x = self.conv2(x) + return x + + +class MBConv(nn.Module): + def __init__(self, in_chs, out_chs, expand_ratio, act_layer, drop_path): + super().__init__() + mid_chs = int(in_chs * expand_ratio) + self.conv1 = ConvNorm(in_chs, mid_chs, ks=1) + self.act1 = act_layer() + self.conv2 = ConvNorm(mid_chs, mid_chs, ks=3, stride=1, pad=1, groups=mid_chs) + self.act2 = act_layer() + self.conv3 = ConvNorm(mid_chs, out_chs, ks=1, bn_weight_init=0.0) + self.act3 = act_layer() + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + x = self.drop_path(x) + x += shortcut + x = self.act3(x) + return x + + +class PatchMerging(nn.Module): + def __init__(self, dim, out_dim, act_layer): + super().__init__() + self.conv1 = ConvNorm(dim, out_dim, 1, 1, 0) + self.act1 = act_layer() + self.conv2 = ConvNorm(out_dim, out_dim, 3, 2, 1, groups=out_dim) + self.act2 = act_layer() + self.conv3 = ConvNorm(out_dim, out_dim, 1, 1, 0) + + def forward(self, x): + x = self.conv1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + return x + + +class ConvLayer(nn.Module): + def __init__( + self, + dim, + depth, + act_layer, + drop_path=0., + conv_expand_ratio=4., + ): + super().__init__() + self.dim = dim + self.depth = depth + self.blocks = nn.Sequential(*[ + MBConv( + dim, dim, conv_expand_ratio, act_layer, + drop_path[i] if isinstance(drop_path, list) else drop_path, + ) + for i in range(depth) + ]) + + def forward(self, x): + x = self.blocks(x) + return x + + +class NormMlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + drop=0., + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.norm = norm_layer(in_features) + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.drop1 = nn.Dropout(drop) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop2 = nn.Dropout(drop) + + def forward(self, x): + x = self.norm(x) + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class Attention(torch.nn.Module): + fused_attn: torch.jit.Final[bool] + attention_bias_cache: Dict[str, torch.Tensor] + + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=(14, 14), + ): + super().__init__() + assert isinstance(resolution, tuple) and len(resolution) == 2 + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.val_dim = int(attn_ratio * key_dim) + self.out_dim = self.val_dim * num_heads + self.attn_ratio = attn_ratio + self.resolution = resolution + self.fused_attn = use_fused_attn() + + self.norm = nn.LayerNorm(dim) + self.qkv = nn.Linear(dim, num_heads * (self.val_dim + 2 * key_dim)) + self.proj = nn.Linear(self.out_dim, dim) + + points = list(itertools.product(range(resolution[0]), range(resolution[1]))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) + self.attention_bias_cache = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if torch.jit.is_tracing() or self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): + attn_bias = self.get_attention_biases(x.device) + B, N, _ = x.shape + # Normalization + x = self.norm(x) + qkv = self.qkv(x) + # (B, N, num_heads, d) + q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3) + # (B, num_heads, N, d) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + if self.fused_attn: + x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn + attn_bias + attn = attn.softmax(dim=-1) + x = attn @ v + x = x.transpose(1, 2).reshape(B, N, self.out_dim) + x = self.proj(x) + return x + + +class TinyVitBlock(nn.Module): + """ TinyViT Block. + + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + local_conv_size (int): the kernel size of the convolution between + Attention and MLP. Default: 3 + act_layer: the activation function. Default: nn.GELU + """ + + def __init__( + self, + dim, + num_heads, + window_size=7, + mlp_ratio=4., + drop=0., + drop_path=0., + local_conv_size=3, + act_layer=nn.GELU + ): + super().__init__() + self.dim = dim + self.num_heads = num_heads + assert window_size > 0, 'window_size must be greater than 0' + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + assert dim % num_heads == 0, 'dim must be divisible by num_heads' + head_dim = dim // num_heads + + window_resolution = (window_size, window_size) + self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + + self.mlp = NormMlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=drop, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + pad = local_conv_size // 2 + self.local_conv = ConvNorm(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim) + + def forward(self, x): + B, H, W, C = x.shape + L = H * W + + shortcut = x + if H == self.window_size and W == self.window_size: + x = x.reshape(B, L, C) + x = self.attn(x) + x = x.view(B, H, W, C) + else: + pad_b = (self.window_size - H % self.window_size) % self.window_size + pad_r = (self.window_size - W % self.window_size) % self.window_size + padding = pad_b > 0 or pad_r > 0 + if padding: + x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + + # window partition + pH, pW = H + pad_b, W + pad_r + nH = pH // self.window_size + nW = pW // self.window_size + x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape( + B * nH * nW, self.window_size * self.window_size, C + ) + + x = self.attn(x) + + # window reverse + x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C) + + if padding: + x = x[:, :H, :W].contiguous() + x = shortcut + self.drop_path1(x) + + x = x.permute(0, 3, 1, 2) + x = self.local_conv(x) + x = x.reshape(B, C, L).transpose(1, 2) + + x = x + self.drop_path2(self.mlp(x)) + return x.view(B, H, W, C) + + def extra_repr(self) -> str: + return f"dim={self.dim}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}" + + +register_notrace_module(TinyVitBlock) + + +class TinyVitStage(nn.Module): + """ A basic TinyViT layer for one stage. + + Args: + dim (int): Number of input channels. + out_dim: the output dimension of the layer + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3 + act_layer: the activation function. Default: nn.GELU + """ + + def __init__( + self, + dim, + out_dim, + depth, + num_heads, + window_size, + mlp_ratio=4., + drop=0., + drop_path=0., + downsample=None, + local_conv_size=3, + act_layer=nn.GELU, + ): + + super().__init__() + self.depth = depth + self.out_dim = out_dim + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + dim=dim, + out_dim=out_dim, + act_layer=act_layer, + ) + else: + self.downsample = nn.Identity() + assert dim == out_dim + + # build blocks + self.blocks = nn.Sequential(*[ + TinyVitBlock( + dim=out_dim, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + local_conv_size=local_conv_size, + act_layer=act_layer, + ) + for i in range(depth)]) + + def forward(self, x): + x = self.downsample(x) + x = x.permute(0, 2, 3, 1) # BCHW -> BHWC + x = self.blocks(x) + x = x.permute(0, 3, 1, 2) # BHWC -> BCHW + return x + + def extra_repr(self) -> str: + return f"dim={self.out_dim}, depth={self.depth}" + + +class TinyVit(nn.Module): + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + embed_dims=(96, 192, 384, 768), + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + window_sizes=(7, 7, 14, 7), + mlp_ratio=4., + drop_rate=0., + drop_path_rate=0.1, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + act_layer=nn.GELU, + ): + super().__init__() + + self.num_classes = num_classes + self.depths = depths + self.num_stages = len(depths) + self.mlp_ratio = mlp_ratio + self.grad_checkpointing = use_checkpoint + + self.patch_embed = PatchEmbed( + in_chs=in_chans, + out_chs=embed_dims[0], + act_layer=act_layer, + ) + + # stochastic depth rate rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] + + # build stages + self.stages = nn.Sequential() + stride = self.patch_embed.stride + prev_dim = embed_dims[0] + self.feature_info = [] + for stage_idx in range(self.num_stages): + if stage_idx == 0: + stage = ConvLayer( + dim=prev_dim, + depth=depths[stage_idx], + act_layer=act_layer, + drop_path=dpr[:depths[stage_idx]], + conv_expand_ratio=mbconv_expand_ratio, + ) + else: + out_dim = embed_dims[stage_idx] + drop_path_rate = dpr[sum(depths[:stage_idx]):sum(depths[:stage_idx + 1])] + stage = TinyVitStage( + dim=embed_dims[stage_idx - 1], + out_dim=out_dim, + depth=depths[stage_idx], + num_heads=num_heads[stage_idx], + window_size=window_sizes[stage_idx], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + local_conv_size=local_conv_size, + drop_path=drop_path_rate, + downsample=PatchMerging, + act_layer=act_layer, + ) + prev_dim = out_dim + stride *= 2 + self.stages.append(stage) + self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{stage_idx}')] + + # Classifier head + self.num_features = self.head_hidden_size = embed_dims[-1] + + norm_layer_cf = partial(LayerNorm2d, eps=1e-5) + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + norm_layer=norm_layer_cf, + ) + + # init weights + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'attention_biases'} + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+)\.\w+\.(\d+)', None), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.patch_embed(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict.keys(): + state_dict = state_dict['model'] + target_sd = model.state_dict() + out_dict = {} + for k, v in state_dict.items(): + if k.endswith('attention_bias_idxs'): + continue + if 'attention_biases' in k: + # TODO: whether move this func into model for dynamic input resolution? (high risk) + v = resize_rel_pos_bias_table_levit(v.T, target_sd[k].shape[::-1]).T + out_dict[k] = v + return out_dict + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv1.conv', + 'classifier': 'head.fc', + 'pool_size': (7, 7), + 'input_size': (3, 224, 224), + 'crop_pct': 0.95, + **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'tiny_vit_5m_224.dist_in22k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22k_distill.pth', + num_classes=21841 + ), + 'tiny_vit_5m_224.dist_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22kto1k_distill.pth' + ), + 'tiny_vit_5m_224.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_1k.pth' + ), + 'tiny_vit_11m_224.dist_in22k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22k_distill.pth', + num_classes=21841 + ), + 'tiny_vit_11m_224.dist_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22kto1k_distill.pth' + ), + 'tiny_vit_11m_224.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_1k.pth' + ), + 'tiny_vit_21m_224.dist_in22k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22k_distill.pth', + num_classes=21841 + ), + 'tiny_vit_21m_224.dist_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_distill.pth' + ), + 'tiny_vit_21m_224.in1k': _cfg( + hf_hub_id='timm/', + #url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_1k.pth' + ), + 'tiny_vit_21m_384.dist_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_384_distill.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + 'tiny_vit_21m_512.dist_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_512_distill.pth', + input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash', + ), +}) + + +def _create_tiny_vit(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) + model = build_model_with_cfg( + TinyVit, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs + ) + return model + + +@register_model +def tiny_vit_5m_224(pretrained=False, **kwargs): + model_kwargs = dict( + embed_dims=[64, 128, 160, 320], + depths=[2, 2, 6, 2], + num_heads=[2, 4, 5, 10], + window_sizes=[7, 7, 14, 7], + drop_path_rate=0.0, + ) + model_kwargs.update(kwargs) + return _create_tiny_vit('tiny_vit_5m_224', pretrained, **model_kwargs) + + +@register_model +def tiny_vit_11m_224(pretrained=False, **kwargs): + model_kwargs = dict( + embed_dims=[64, 128, 256, 448], + depths=[2, 2, 6, 2], + num_heads=[2, 4, 8, 14], + window_sizes=[7, 7, 14, 7], + drop_path_rate=0.1, + ) + model_kwargs.update(kwargs) + return _create_tiny_vit('tiny_vit_11m_224', pretrained, **model_kwargs) + + +@register_model +def tiny_vit_21m_224(pretrained=False, **kwargs): + model_kwargs = dict( + embed_dims=[96, 192, 384, 576], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 18], + window_sizes=[7, 7, 14, 7], + drop_path_rate=0.2, + ) + model_kwargs.update(kwargs) + return _create_tiny_vit('tiny_vit_21m_224', pretrained, **model_kwargs) + + +@register_model +def tiny_vit_21m_384(pretrained=False, **kwargs): + model_kwargs = dict( + embed_dims=[96, 192, 384, 576], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 18], + window_sizes=[12, 12, 24, 12], + drop_path_rate=0.1, + ) + model_kwargs.update(kwargs) + return _create_tiny_vit('tiny_vit_21m_384', pretrained, **model_kwargs) + + +@register_model +def tiny_vit_21m_512(pretrained=False, **kwargs): + model_kwargs = dict( + embed_dims=[96, 192, 384, 576], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 18], + window_sizes=[16, 16, 32, 16], + drop_path_rate=0.1, + ) + model_kwargs.update(kwargs) + return _create_tiny_vit('tiny_vit_21m_512', pretrained, **model_kwargs) diff --git a/pytorch-image-models/timm/models/tnt.py b/pytorch-image-models/timm/models/tnt.py new file mode 100644 index 0000000000000000000000000000000000000000..9e37770ac7adecb697ce3a7f53b3664da5c743ef --- /dev/null +++ b/pytorch-image-models/timm/models/tnt.py @@ -0,0 +1,374 @@ +""" Transformer in Transformer (TNT) in PyTorch + +A PyTorch implement of TNT as described in +'Transformer in Transformer' - https://arxiv.org/abs/2103.00112 + +The official mindspore code is released and available at +https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT +""" +import math +from typing import Optional + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import Mlp, DropPath, trunc_normal_, _assert, to_2tuple +from ._builder import build_model_with_cfg +from ._registry import register_model +from .vision_transformer import resize_pos_embed + +__all__ = ['TNT'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'pixel_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'tnt_s_patch16_224': _cfg( + url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), + 'tnt_b_patch16_224': _cfg( + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), +} + + +class Attention(nn.Module): + """ Multi-Head Attention + """ + def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop, inplace=True) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop, inplace=True) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple) + v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """ TNT Block + """ + def __init__( + self, + dim, + dim_out, + num_pixel, + num_heads_in=4, + num_heads_out=12, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(dim) + self.attn_in = Attention( + dim, + dim, + num_heads=num_heads_in, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + + self.norm_mlp_in = norm_layer(dim) + self.mlp_in = Mlp( + in_features=dim, + hidden_features=int(dim * 4), + out_features=dim, + act_layer=act_layer, + drop=proj_drop, + ) + + self.norm1_proj = norm_layer(dim) + self.proj = nn.Linear(dim * num_pixel, dim_out, bias=True) + + # Outer transformer + self.norm_out = norm_layer(dim_out) + self.attn_out = Attention( + dim_out, + dim_out, + num_heads=num_heads_out, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm_mlp = norm_layer(dim_out) + self.mlp = Mlp( + in_features=dim_out, + hidden_features=int(dim_out * mlp_ratio), + out_features=dim_out, + act_layer=act_layer, + drop=proj_drop, + ) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) + pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) + # outer + B, N, C = patch_embed.size() + patch_embed = torch.cat( + [patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))], + dim=1) + patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) + patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Module): + """ Image to Pixel Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # grid_size property necessary for resizing positional embedding + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + num_patches = (self.grid_size[0]) * (self.grid_size[1]) + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) + return x + + +class TNT(nn.Module): + """ Transformer in Transformer - https://arxiv.org/abs/2103.00112 + """ + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='token', + embed_dim=768, + inner_dim=48, + depth=12, + num_heads_inner=4, + num_heads_outer=12, + mlp_ratio=4., + qkv_bias=False, + drop_rate=0., + pos_drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + first_stride=4, + ): + super().__init__() + assert global_pool in ('', 'token', 'avg') + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.grad_checkpointing = False + + self.pixel_embed = PixelEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + in_dim=inner_dim, + stride=first_stride, + ) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = norm_layer(num_pixel * inner_dim) + self.proj = nn.Linear(num_pixel * inner_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pixel_pos = nn.Parameter(torch.zeros(1, inner_dim, new_patch_size[0], new_patch_size[1])) + self.pos_drop = nn.Dropout(p=pos_drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + blocks = [] + for i in range(depth): + blocks.append(Block( + dim=inner_dim, + dim_out=embed_dim, + num_pixel=num_pixel, + num_heads_in=num_heads_inner, + num_heads_out=num_heads_outer, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + )) + self.blocks = nn.ModuleList(blocks) + self.norm = norm_layer(embed_dim) + + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'patch_pos', 'pixel_pos', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^cls_token|patch_pos|pixel_pos|pixel_embed|norm[12]_proj|proj', # stem and embed / pos + blocks=[ + (r'^blocks\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'token', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + + if self.grad_checkpointing and not torch.jit.is_scripting(): + for blk in self.blocks: + pixel_embed, patch_embed = checkpoint(blk, pixel_embed, patch_embed) + else: + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if state_dict['patch_pos'].shape != model.patch_pos.shape: + state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], + model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) + return state_dict + + +def _create_tnt(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + TNT, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def tnt_s_patch16_224(pretrained=False, **kwargs) -> TNT: + model_cfg = dict( + patch_size=16, embed_dim=384, inner_dim=24, depth=12, num_heads_outer=6, + qkv_bias=False) + model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def tnt_b_patch16_224(pretrained=False, **kwargs) -> TNT: + model_cfg = dict( + patch_size=16, embed_dim=640, inner_dim=40, depth=12, num_heads_outer=10, + qkv_bias=False) + model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/tresnet.py b/pytorch-image-models/timm/models/tresnet.py new file mode 100644 index 0000000000000000000000000000000000000000..dec24c1f879744d9cb4f59c87ffde02d4c85437d --- /dev/null +++ b/pytorch-image-models/timm/models/tresnet.py @@ -0,0 +1,346 @@ +""" +TResNet: High Performance GPU-Dedicated Architecture +https://arxiv.org/pdf/2003.13630.pdf + +Original model: https://github.com/mrT23/TResNet + +""" +from collections import OrderedDict +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn + +from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule, ConvNormAct, DropPath +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs, register_model_deprecations + +__all__ = ['TResNet'] # model_registry will add each entrypoint fn to this + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + use_se=True, + aa_layer=None, + drop_path_rate=0. + ): + super(BasicBlock, self).__init__() + self.downsample = downsample + self.stride = stride + act_layer = partial(nn.LeakyReLU, negative_slope=1e-3) + + self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer) + self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False) + self.act = nn.ReLU(inplace=True) + + rd_chs = max(planes * self.expansion // 4, 64) + self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + out = self.conv1(x) + out = self.conv2(out) + if self.se is not None: + out = self.se(out) + out = self.drop_path(out) + shortcut + out = self.act(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + use_se=True, + act_layer=None, + aa_layer=None, + drop_path_rate=0., + ): + super(Bottleneck, self).__init__() + self.downsample = downsample + self.stride = stride + act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3) + + self.conv1 = ConvNormAct( + inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer) + self.conv2 = ConvNormAct( + planes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer) + + reduction_chs = max(planes * self.expansion // 8, 64) + self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None + + self.conv3 = ConvNormAct( + planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False) + + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.act = nn.ReLU(inplace=True) + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + out = self.conv1(x) + out = self.conv2(out) + if self.se is not None: + out = self.se(out) + out = self.conv3(out) + out = self.drop_path(out) + shortcut + out = self.act(out) + return out + + +class TResNet(nn.Module): + def __init__( + self, + layers, + in_chans=3, + num_classes=1000, + width_factor=1.0, + v2=False, + global_pool='fast', + drop_rate=0., + drop_path_rate=0., + ): + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + super(TResNet, self).__init__() + + aa_layer = BlurPool2d + act_layer = nn.LeakyReLU + + # TResnet stages + self.inplanes = int(64 * width_factor) + self.planes = int(64 * width_factor) + if v2: + self.inplanes = self.inplanes // 8 * 8 + self.planes = self.planes // 8 * 8 + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer) + layer1 = self._make_layer( + Bottleneck if v2 else BasicBlock, + self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0]) + layer2 = self._make_layer( + Bottleneck if v2 else BasicBlock, + self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1]) + layer3 = self._make_layer( + Bottleneck, + self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2]) + layer4 = self._make_layer( + Bottleneck, + self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3]) + + # body + self.body = nn.Sequential(OrderedDict([ + ('s2d', SpaceToDepth()), + ('conv1', conv1), + ('layer1', layer1), + ('layer2', layer2), + ('layer3', layer3), + ('layer4', layer4), + ])) + + self.feature_info = [ + dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? + dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'), + dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'), + dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), + dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), + ] + + # head + self.num_features = self.head_hidden_size = (self.planes * 8) * Bottleneck.expansion + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # model initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') + if isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + + # residual connections special initialization + for m in self.modules(): + if isinstance(m, BasicBlock): + nn.init.zeros_(m.conv2.bn.weight) + if isinstance(m, Bottleneck): + nn.init.zeros_(m.conv3.bn.weight) + + def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + layers = [] + if stride == 2: + # avg pooling before 1x1 conv + layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) + layers += [ConvNormAct( + self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False)] + downsample = nn.Sequential(*layers) + + layers = [] + for i in range(blocks): + layers.append(block( + self.inplanes, + planes, + stride=stride if i == 0 else 1, + downsample=downsample if i == 0 else None, + use_se=use_se, + aa_layer=aa_layer, + drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate, + )) + self.inplanes = planes * block.expansion + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, pool_type=global_pool) + + def forward_features(self, x): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = self.body.s2d(x) + x = self.body.conv1(x) + x = checkpoint_seq([ + self.body.layer1, + self.body.layer2, + self.body.layer3, + self.body.layer4], + x, flatten=True) + else: + x = self.body(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'body.conv1.conv.weight' in state_dict: + return state_dict + + import re + state_dict = state_dict.get('model', state_dict) + state_dict = state_dict.get('state_dict', state_dict) + out_dict = {} + for k, v in state_dict.items(): + k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k) + k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k) + k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k) + k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k) + k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k) + k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k) + if k.endswith('bn.weight'): + # convert weight from inplace_abn to batchnorm + v = v.abs().add(1e-5) + out_dict[k] = v + return out_dict + + +def _create_tresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + TResNet, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': (0., 0., 0.), 'std': (1., 1., 1.), + 'first_conv': 'body.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), + 'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), + 'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'), + 'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'), + 'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'), + 'tresnet_m.miil_in1k_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + hf_hub_id='timm/'), + 'tresnet_l.miil_in1k_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + hf_hub_id='timm/'), + 'tresnet_xl.miil_in1k_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + hf_hub_id='timm/'), + + 'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), + 'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), +}) + + +@register_model +def tresnet_m(pretrained=False, **kwargs) -> TResNet: + model_args = dict(layers=[3, 4, 11, 3]) + return _create_tresnet('tresnet_m', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def tresnet_l(pretrained=False, **kwargs) -> TResNet: + model_args = dict(layers=[4, 5, 18, 3], width_factor=1.2) + return _create_tresnet('tresnet_l', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def tresnet_xl(pretrained=False, **kwargs) -> TResNet: + model_args = dict(layers=[4, 5, 24, 3], width_factor=1.3) + return _create_tresnet('tresnet_xl', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet: + model_args = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True) + return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **dict(model_args, **kwargs)) + + +register_model_deprecations(__name__, { + 'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k', + 'tresnet_m_448': 'tresnet_m.miil_in1k_448', + 'tresnet_l_448': 'tresnet_l.miil_in1k_448', + 'tresnet_xl_448': 'tresnet_xl.miil_in1k_448', +}) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/twins.py b/pytorch-image-models/timm/models/twins.py new file mode 100644 index 0000000000000000000000000000000000000000..62029b94993df5fa86901cbf3c553f3444bb024a --- /dev/null +++ b/pytorch-image-models/timm/models/twins.py @@ -0,0 +1,581 @@ +""" Twins +A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` + - https://arxiv.org/pdf/2104.13840.pdf + +Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below + +""" +# -------------------------------------------------------- +# Twins +# Copyright (c) 2021 Meituan +# Licensed under The Apache 2.0 License [see LICENSE for details] +# Written by Xinjie Li, Xiangxiang Chu +# -------------------------------------------------------- +import math +from functools import partial +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import Mlp, DropPath, to_2tuple, trunc_normal_, use_fused_attn +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_module +from ._registry import register_model, generate_default_cfgs +from .vision_transformer import Attention + +__all__ = ['Twins'] # model_registry will add each entrypoint fn to this + +Size_ = Tuple[int, int] + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class LocallyGroupedAttn(nn.Module): + """ LSA: self attention within a group + """ + fused_attn: torch.jit.Final[bool] + + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): + assert ws != 1 + super(LocallyGroupedAttn, self).__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, size: Size_): + # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for + # both. You can choose any one, we recommend forward_padding because it's neat. However, + # the masking implementation is more reasonable and accurate. + B, N, C = x.shape + H, W = size + x = x.view(B, H, W, C) + pad_l = pad_t = 0 + pad_r = (self.ws - W % self.ws) % self.ws + pad_b = (self.ws - H % self.ws) % self.ws + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + _h, _w = Hp // self.ws, Wp // self.ws + x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) + qkv = self.qkv(x).reshape( + B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + q, k, v = qkv.unbind(0) + + if self.fused_attn: + x = F.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + x = x.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + # def forward_mask(self, x, size: Size_): + # B, N, C = x.shape + # H, W = size + # x = x.view(B, H, W, C) + # pad_l = pad_t = 0 + # pad_r = (self.ws - W % self.ws) % self.ws + # pad_b = (self.ws - H % self.ws) % self.ws + # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + # _, Hp, Wp, _ = x.shape + # _h, _w = Hp // self.ws, Wp // self.ws + # mask = torch.zeros((1, Hp, Wp), device=x.device) + # mask[:, -pad_b:, :].fill_(1) + # mask[:, :, -pad_r:].fill_(1) + # + # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C + # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) + # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws + # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) + # qkv = self.qkv(x).reshape( + # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + # # n_h, B, _w*_h, nhead, ws*ws, dim + # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head + # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws + # attn = attn + attn_mask.unsqueeze(2) + # attn = attn.softmax(dim=-1) + # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head + # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + # if pad_r > 0 or pad_b > 0: + # x = x[:, :H, :W, :].contiguous() + # x = x.reshape(B, N, C) + # x = self.proj(x) + # x = self.proj_drop(x) + # return x + + +class GlobalSubSampleAttn(nn.Module): + """ GSA: using a key to summarize the information for a group to be efficient. + """ + fused_attn: torch.jit.Final[bool] + + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.q = nn.Linear(dim, dim, bias=True) + self.kv = nn.Linear(dim, dim * 2, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + + def forward(self, x, size: Size_): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr is not None: + x = x.permute(0, 2, 1).reshape(B, C, *size) + x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + + if self.fused_attn: + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + sr_ratio=1, + ws=None, + ): + super().__init__() + self.norm1 = norm_layer(dim) + if ws is None: + self.attn = Attention(dim, num_heads, False, None, attn_drop, proj_drop) + elif ws == 1: + self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, proj_drop, sr_ratio) + else: + self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, proj_drop, ws) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x, size: Size_): + x = x + self.drop_path1(self.attn(self.norm1(x), size)) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + return x + + +class PosConv(nn.Module): + # PEG from https://arxiv.org/abs/2102.10882 + def __init__(self, in_chans, embed_dim=768, stride=1): + super(PosConv, self).__init__() + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), + ) + self.stride = stride + + def forward(self, x, size: Size_): + B, N, C = x.shape + cnn_feat_token = x.transpose(1, 2).view(B, C, *size) + x = self.proj(cnn_feat_token) + if self.stride == 1: + x += cnn_feat_token + x = x.flatten(2).transpose(1, 2) + return x + + def no_weight_decay(self): + return ['proj.%d.weight' % i for i in range(4)] + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ + f"img_size {img_size} should be divided by patch_size {patch_size}." + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x) -> Tuple[torch.Tensor, Size_]: + B, C, H, W = x.shape + + x = self.proj(x).flatten(2).transpose(1, 2) + x = self.norm(x) + out_size = (H // self.patch_size[0], W // self.patch_size[1]) + + return x, out_size + + +class Twins(nn.Module): + """ Twins Vision Transfomer (Revisiting Spatial Attention) + + Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git + """ + def __init__( + self, + img_size=224, + patch_size=4, + in_chans=3, + num_classes=1000, + global_pool='avg', + embed_dims=(64, 128, 256, 512), + num_heads=(1, 2, 4, 8), + mlp_ratios=(4, 4, 4, 4), + depths=(3, 4, 6, 3), + sr_ratios=(8, 4, 2, 1), + wss=None, + drop_rate=0., + pos_drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), + block_cls=Block, + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.depths = depths + self.embed_dims = embed_dims + self.num_features = self.head_hidden_size = embed_dims[-1] + self.grad_checkpointing = False + + img_size = to_2tuple(img_size) + prev_chs = in_chans + self.patch_embeds = nn.ModuleList() + self.pos_drops = nn.ModuleList() + for i in range(len(depths)): + self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) + self.pos_drops.append(nn.Dropout(p=pos_drop_rate)) + prev_chs = embed_dims[i] + img_size = tuple(t // patch_size for t in img_size) + patch_size = 2 + + self.blocks = nn.ModuleList() + self.feature_info = [] + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + for k in range(len(depths)): + _block = nn.ModuleList([block_cls( + dim=embed_dims[k], + num_heads=num_heads[k], + mlp_ratio=mlp_ratios[k], + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[cur + i], + norm_layer=norm_layer, + sr_ratio=sr_ratios[k], + ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])], + ) + self.blocks.append(_block) + self.feature_info += [dict(module=f'block.{k}', num_chs=embed_dims[k], reduction=2**(2+k))] + cur += depths[k] + + self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) + + self.norm = norm_layer(self.num_features) + + # classification head + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # init weights + self.apply(self._init_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embeds.0', # stem and embed + blocks=[ + (r'^(?:blocks|patch_embeds|pos_block)\.(\d+)', None), + ('^norm', (99999,)) + ] if coarse else [ + (r'^blocks\.(\d+)\.(\d+)', None), + (r'^(?:patch_embeds|pos_block)\.(\d+)', (0,)), + (r'^norm', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt == 'NCHW', 'Output shape for Twins must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # FIXME slice block/pos_block if < max + + # forward pass + B, _, height, width = x.shape + for i, (embed, drop, blocks, pos_blk) in enumerate(zip( + self.patch_embeds, self.pos_drops, self.blocks, self.pos_block) + ): + x, size = embed(x) + x = drop(x) + for j, blk in enumerate(blocks): + x = blk(x, size) + if j == 0: + x = pos_blk(x, size) # PEG here + + if i < len(self.depths) - 1: + x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() + if i in take_indices: + intermediates.append(x) + else: + if i in take_indices: + # only last feature can be normed + x_feat = self.norm(x) if norm else x + intermediates.append(x_feat.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + # FIXME add block pruning + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + B = x.shape[0] + for i, (embed, drop, blocks, pos_blk) in enumerate( + zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): + x, size = embed(x) + x = drop(x) + for j, blk in enumerate(blocks): + x = blk(x, size) + if j == 0: + x = pos_blk(x, size) # PEG here + if i < len(self.depths) - 1: + x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=1) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_twins(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 4) + model = build_model_with_cfg( + Twins, variant, pretrained, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'twins_pcpvt_small.in1k': _cfg(hf_hub_id='timm/'), + 'twins_pcpvt_base.in1k': _cfg(hf_hub_id='timm/'), + 'twins_pcpvt_large.in1k': _cfg(hf_hub_id='timm/'), + 'twins_svt_small.in1k': _cfg(hf_hub_id='timm/'), + 'twins_svt_base.in1k': _cfg(hf_hub_id='timm/'), + 'twins_svt_large.in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def twins_pcpvt_small(pretrained=False, **kwargs) -> Twins: + model_args = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]) + return _create_twins('twins_pcpvt_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def twins_pcpvt_base(pretrained=False, **kwargs) -> Twins: + model_args = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1]) + return _create_twins('twins_pcpvt_base', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def twins_pcpvt_large(pretrained=False, **kwargs) -> Twins: + model_args = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1]) + return _create_twins('twins_pcpvt_large', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def twins_svt_small(pretrained=False, **kwargs) -> Twins: + model_args = dict( + patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) + return _create_twins('twins_svt_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def twins_svt_base(pretrained=False, **kwargs) -> Twins: + model_args = dict( + patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) + return _create_twins('twins_svt_base', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def twins_svt_large(pretrained=False, **kwargs) -> Twins: + model_args = dict( + patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) + return _create_twins('twins_svt_large', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/vgg.py b/pytorch-image-models/timm/models/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..4136b12c7c116bc3edf84df4d6f17a4011e301f0 --- /dev/null +++ b/pytorch-image-models/timm/models/vgg.py @@ -0,0 +1,298 @@ +"""VGG + +Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for +timm functionality. + +Copyright 2021 Ross Wightman +""" +from typing import Any, Dict, List, Optional, Union, cast + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import ClassifierHead +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_module +from ._registry import register_model, generate_default_cfgs + +__all__ = ['VGG'] + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class ConvMlp(nn.Module): + + def __init__( + self, + in_features=512, + out_features=4096, + kernel_size=7, + mlp_ratio=1.0, + drop_rate: float = 0.2, + act_layer: nn.Module = None, + conv_layer: nn.Module = None, + ): + super(ConvMlp, self).__init__() + self.input_kernel_size = kernel_size + mid_features = int(out_features * mlp_ratio) + self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) + self.act1 = act_layer(True) + self.drop = nn.Dropout(drop_rate) + self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) + self.act2 = act_layer(True) + + def forward(self, x): + if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: + # keep the input size >= 7x7 + output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) + x = F.adaptive_avg_pool2d(x, output_size) + x = self.fc1(x) + x = self.act1(x) + x = self.drop(x) + x = self.fc2(x) + x = self.act2(x) + return x + + +class VGG(nn.Module): + + def __init__( + self, + cfg: List[Any], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + mlp_ratio: float = 1.0, + act_layer: nn.Module = nn.ReLU, + conv_layer: nn.Module = nn.Conv2d, + norm_layer: nn.Module = None, + global_pool: str = 'avg', + drop_rate: float = 0., + ) -> None: + super(VGG, self).__init__() + assert output_stride == 32 + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.use_norm = norm_layer is not None + self.feature_info = [] + + prev_chs = in_chans + net_stride = 1 + pool_layer = nn.MaxPool2d + layers: List[nn.Module] = [] + for v in cfg: + last_idx = len(layers) - 1 + if v == 'M': + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) + layers += [pool_layer(kernel_size=2, stride=2)] + net_stride *= 2 + else: + v = cast(int, v) + conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) + if norm_layer is not None: + layers += [conv2d, norm_layer(v), act_layer(inplace=True)] + else: + layers += [conv2d, act_layer(inplace=True)] + prev_chs = v + self.features = nn.Sequential(*layers) + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) + + self.num_features = prev_chs + self.head_hidden_size = 4096 + self.pre_logits = ConvMlp( + prev_chs, + self.head_hidden_size, + 7, + mlp_ratio=mlp_ratio, + drop_rate=drop_rate, + act_layer=act_layer, + conv_layer=conv_layer, + ) + self.head = ClassifierHead( + self.head_hidden_size, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + self._initialize_weights() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + # this treats BN layers as separate groups for bn variants, a lot of effort to fix that + return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)') + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + return x + + def forward_head(self, x: torch.Tensor, pre_logits: bool = False): + x = self.pre_logits(x) + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + k_r = k + k_r = k_r.replace('classifier.0', 'pre_logits.fc1') + k_r = k_r.replace('classifier.3', 'pre_logits.fc2') + k_r = k_r.replace('classifier.6', 'head.fc') + if 'classifier.0.weight' in k: + v = v.reshape(-1, 512, 7, 7) + if 'classifier.3.weight' in k: + v = v.reshape(-1, 4096, 1, 1) + out_dict[k_r] = v + return out_dict + + +def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: + cfg = variant.split('_')[0] + # NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5] + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5)) + model = build_model_with_cfg( + VGG, + variant, + pretrained, + model_cfg=cfgs[cfg], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_filter_fn=_filter_fn, + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'), + 'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'), + 'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'), + 'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'), + 'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'), + 'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'), + 'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'), + 'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg11', pretrained=pretrained, **model_args) + + +@register_model +def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg13', pretrained=pretrained, **model_args) + + +@register_model +def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg16', pretrained=pretrained, **model_args) + + +@register_model +def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg19', pretrained=pretrained, **model_args) + + +@register_model +def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/visformer.py b/pytorch-image-models/timm/models/visformer.py new file mode 100644 index 0000000000000000000000000000000000000000..2ed3be5da86107feac6e760343b2c57e97b651b3 --- /dev/null +++ b/pytorch-image-models/timm/models/visformer.py @@ -0,0 +1,549 @@ +""" Visformer + +Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 + +From original at https://github.com/danczs/Visformer + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['Visformer'] + + +class SpatialMlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0., + group=8, + spatial_conv=False, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.in_features = in_features + self.out_features = out_features + self.spatial_conv = spatial_conv + if self.spatial_conv: + if group < 2: # net setting + hidden_features = in_features * 5 // 6 + else: + hidden_features = in_features * 2 + self.hidden_features = hidden_features + self.group = group + self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) + self.act1 = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if self.spatial_conv: + self.conv2 = nn.Conv2d( + hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) + self.act2 = act_layer() + else: + self.conv2 = None + self.act2 = None + self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) + self.drop3 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.conv1(x) + x = self.act1(x) + x = self.drop1(x) + if self.conv2 is not None: + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + x = self.drop3(x) + return x + + +class Attention(nn.Module): + fused_attn: torch.jit.Final[bool] + + def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): + super().__init__() + self.dim = dim + self.num_heads = num_heads + head_dim = round(dim // num_heads * head_dim_ratio) + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn(experimental=True) + + self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, C, H, W = x.shape + x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) + q, k, v = x.unbind(0) + + if self.fused_attn: + x = torch.nn.functional.scaled_dot_product_attention( + q.contiguous(), k.contiguous(), v.contiguous(), + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__( + self, + dim, + num_heads, + head_dim_ratio=1., + mlp_ratio=4., + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=LayerNorm2d, + group=8, + attn_disabled=False, + spatial_conv=False, + ): + super().__init__() + self.spatial_conv = spatial_conv + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + if attn_disabled: + self.norm1 = None + self.attn = None + else: + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + head_dim_ratio=head_dim_ratio, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + + self.norm2 = norm_layer(dim) + self.mlp = SpatialMlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + group=group, + spatial_conv=spatial_conv, + ) + + def forward(self, x): + if self.attn is not None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Visformer(nn.Module): + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + init_channels=32, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4., + drop_rate=0., + pos_drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=LayerNorm2d, + attn_stage='111', + use_pos_embed=True, + spatial_conv='111', + vit_stem=False, + group=8, + global_pool='avg', + conv_init=False, + embed_norm=None, + ): + super().__init__() + img_size = to_2tuple(img_size) + self.num_classes = num_classes + self.embed_dim = embed_dim + self.init_channels = init_channels + self.img_size = img_size + self.vit_stem = vit_stem + self.conv_init = conv_init + if isinstance(depth, (list, tuple)): + self.stage_num1, self.stage_num2, self.stage_num3 = depth + depth = sum(depth) + else: + self.stage_num1 = self.stage_num3 = depth // 3 + self.stage_num2 = depth - self.stage_num1 - self.stage_num3 + self.use_pos_embed = use_pos_embed + self.grad_checkpointing = False + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + # stage 1 + if self.vit_stem: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=embed_norm, + flatten=False, + ) + img_size = [x // patch_size for x in img_size] + else: + if self.init_channels is None: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, + patch_size=patch_size // 2, + in_chans=in_chans, + embed_dim=embed_dim // 2, + norm_layer=embed_norm, + flatten=False, + ) + img_size = [x // (patch_size // 2) for x in img_size] + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(self.init_channels), + nn.ReLU(inplace=True) + ) + img_size = [x // 2 for x in img_size] + self.patch_embed1 = PatchEmbed( + img_size=img_size, + patch_size=patch_size // 4, + in_chans=self.init_channels, + embed_dim=embed_dim // 2, + norm_layer=embed_norm, + flatten=False, + ) + img_size = [x // (patch_size // 4) for x in img_size] + + if self.use_pos_embed: + if self.vit_stem: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + else: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) + self.pos_drop = nn.Dropout(p=pos_drop_rate) + else: + self.pos_embed1 = None + + self.stage1 = nn.Sequential(*[ + Block( + dim=embed_dim//2, + num_heads=num_heads, + head_dim_ratio=0.5, + mlp_ratio=mlp_ratio, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + group=group, + attn_disabled=(attn_stage[0] == '0'), + spatial_conv=(spatial_conv[0] == '1'), + ) + for i in range(self.stage_num1) + ]) + + # stage2 + if not self.vit_stem: + self.patch_embed2 = PatchEmbed( + img_size=img_size, + patch_size=patch_size // 8, + in_chans=embed_dim // 2, + embed_dim=embed_dim, + norm_layer=embed_norm, + flatten=False, + ) + img_size = [x // (patch_size // 8) for x in img_size] + if self.use_pos_embed: + self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + else: + self.pos_embed2 = None + else: + self.patch_embed2 = None + self.stage2 = nn.Sequential(*[ + Block( + dim=embed_dim, + num_heads=num_heads, + head_dim_ratio=1.0, + mlp_ratio=mlp_ratio, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + group=group, + attn_disabled=(attn_stage[1] == '0'), + spatial_conv=(spatial_conv[1] == '1'), + ) + for i in range(self.stage_num1, self.stage_num1+self.stage_num2) + ]) + + # stage 3 + if not self.vit_stem: + self.patch_embed3 = PatchEmbed( + img_size=img_size, + patch_size=patch_size // 8, + in_chans=embed_dim, + embed_dim=embed_dim * 2, + norm_layer=embed_norm, + flatten=False, + ) + img_size = [x // (patch_size // 8) for x in img_size] + if self.use_pos_embed: + self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) + else: + self.pos_embed3 = None + else: + self.patch_embed3 = None + self.stage3 = nn.Sequential(*[ + Block( + dim=embed_dim * 2, + num_heads=num_heads, + head_dim_ratio=1.0, + mlp_ratio=mlp_ratio, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + group=group, + attn_disabled=(attn_stage[2] == '0'), + spatial_conv=(spatial_conv[2] == '1'), + ) + for i in range(self.stage_num1+self.stage_num2, depth) + ]) + + self.num_features = self.head_hidden_size = embed_dim if self.vit_stem else embed_dim * 2 + self.norm = norm_layer(self.num_features) + + # head + global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + self.global_pool = global_pool + self.head_drop = nn.Dropout(drop_rate) + self.head = head + + # weights init + if self.use_pos_embed: + trunc_normal_(self.pos_embed1, std=0.02) + if not self.vit_stem: + trunc_normal_(self.pos_embed2, std=0.02) + trunc_normal_(self.pos_embed3, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + if self.conv_init: + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + else: + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0.) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^patch_embed1|pos_embed1|stem', # stem and embed + blocks=[ + (r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None), + (r'^(?:patch_embed|pos_embed)(\d+)', (0,)), + (r'^norm', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + if self.stem is not None: + x = self.stem(x) + + # stage 1 + x = self.patch_embed1(x) + if self.pos_embed1 is not None: + x = self.pos_drop(x + self.pos_embed1) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stage1, x) + else: + x = self.stage1(x) + + # stage 2 + if self.patch_embed2 is not None: + x = self.patch_embed2(x) + if self.pos_embed2 is not None: + x = self.pos_drop(x + self.pos_embed2) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stage2, x) + else: + x = self.stage2(x) + + # stage3 + if self.patch_embed3 is not None: + x = self.patch_embed3(x) + if self.pos_embed3 is not None: + x = self.pos_drop(x + self.pos_embed3) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stage3, x) + else: + x = self.stage3(x) + + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'), + 'visformer_small.in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def visformer_tiny(pretrained=False, **kwargs) -> Visformer: + model_cfg = dict( + init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d) + model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +@register_model +def visformer_small(pretrained=False, **kwargs) -> Visformer: + model_cfg = dict( + init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d) + model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) + return model + + +# @register_model +# def visformer_net1(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net2(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net3(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net4(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net5(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net6(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net7(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model + + + + diff --git a/pytorch-image-models/timm/models/vision_transformer.py b/pytorch-image-models/timm/models/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..b3b0ddca0776e1a344157e5c5fb05a107a7ca13b --- /dev/null +++ b/pytorch-image-models/timm/models/vision_transformer.py @@ -0,0 +1,3420 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +`FlexiViT: One Model for All Patch Sizes` + - https://arxiv.org/abs/2212.08013 + +The official jax code is released and available at + * https://github.com/google-research/vision_transformer + * https://github.com/google-research/big_vision + +Acknowledgments: + * The paper authors for releasing code and weights, thanks! + * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch + * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT + * Bert reference code checks against Huggingface Transformers and Tensorflow Bert + +Hacked together by / Copyright 2020, Ross Wightman +""" +import copy +import logging +import math +from collections import OrderedDict +from functools import partial +from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, Union, List +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint +from torch.jit import Final + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD, \ + OPENAI_CLIP_MEAN, OPENAI_CLIP_STD +from timm.layers import PatchEmbed, Mlp, DropPath, AttentionPoolLatent, RmsNorm, PatchDropout, SwiGLUPacked, \ + trunc_normal_, lecun_normal_, resample_patch_embed, resample_abs_pos_embed, use_fused_attn, \ + get_act_layer, get_norm_layer, LayerType +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['VisionTransformer'] # model_registry will add each entrypoint fn to this + + +_logger = logging.getLogger(__name__) + + +class Attention(nn.Module): + fused_attn: Final[bool] + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + qk_norm: bool = False, + attn_drop: float = 0., + proj_drop: float = 0., + norm_layer: nn.Module = nn.LayerNorm, + ) -> None: + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + q, k = self.q_norm(q), self.k_norm(k) + + if self.fused_attn: + x = F.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__( + self, + dim: int, + init_values: float = 1e-5, + inplace: bool = False, + ) -> None: + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4., + qkv_bias: bool = False, + qk_norm: bool = False, + proj_drop: float = 0., + attn_drop: float = 0., + init_values: Optional[float] = None, + drop_path: float = 0., + act_layer: nn.Module = nn.GELU, + norm_layer: nn.Module = nn.LayerNorm, + mlp_layer: nn.Module = Mlp, + ) -> None: + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + ) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = mlp_layer( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class ResPostBlock(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4., + qkv_bias: bool = False, + qk_norm: bool = False, + proj_drop: float = 0., + attn_drop: float = 0., + init_values: Optional[float] = None, + drop_path: float = 0., + act_layer: nn.Module = nn.GELU, + norm_layer: nn.Module = nn.LayerNorm, + mlp_layer: nn.Module = Mlp, + ) -> None: + super().__init__() + self.init_values = init_values + + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + ) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = mlp_layer( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.init_weights() + + def init_weights(self) -> None: + # NOTE this init overrides that base model init with specific changes for the block type + if self.init_values is not None: + nn.init.constant_(self.norm1.weight, self.init_values) + nn.init.constant_(self.norm2.weight, self.init_values) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x + self.drop_path1(self.norm1(self.attn(x))) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + return x + + +class ParallelScalingBlock(nn.Module): + """ Parallel ViT block (MLP & Attention in parallel) + Based on: + 'Scaling Vision Transformers to 22 Billion Parameters` - https://arxiv.org/abs/2302.05442 + """ + fused_attn: Final[bool] + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4., + qkv_bias: bool = False, + qk_norm: bool = False, + proj_drop: float = 0., + attn_drop: float = 0., + init_values: Optional[float] = None, + drop_path: float = 0., + act_layer: nn.Module = nn.GELU, + norm_layer: nn.Module = nn.LayerNorm, + mlp_layer: Optional[nn.Module] = None, + ) -> None: + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.fused_attn = use_fused_attn() + mlp_hidden_dim = int(mlp_ratio * dim) + in_proj_out_dim = mlp_hidden_dim + 3 * dim + + self.in_norm = norm_layer(dim) + self.in_proj = nn.Linear(dim, in_proj_out_dim, bias=qkv_bias) + self.in_split = [mlp_hidden_dim] + [dim] * 3 + if qkv_bias: + self.register_buffer('qkv_bias', None) + self.register_parameter('mlp_bias', None) + else: + self.register_buffer('qkv_bias', torch.zeros(3 * dim), persistent=False) + self.mlp_bias = nn.Parameter(torch.zeros(mlp_hidden_dim)) + + self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.attn_drop = nn.Dropout(attn_drop) + self.attn_out_proj = nn.Linear(dim, dim) + + self.mlp_drop = nn.Dropout(proj_drop) + self.mlp_act = act_layer() + self.mlp_out_proj = nn.Linear(mlp_hidden_dim, dim) + + self.ls = LayerScale(dim, init_values=init_values) if init_values is not None else nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, N, C = x.shape + + # Combined MLP fc1 & qkv projections + y = self.in_norm(x) + if self.mlp_bias is not None: + # Concat constant zero-bias for qkv w/ trainable mlp_bias. + # Appears faster than adding to x_mlp separately + y = F.linear(y, self.in_proj.weight, torch.cat((self.qkv_bias, self.mlp_bias))) + else: + y = self.in_proj(y) + x_mlp, q, k, v = torch.split(y, self.in_split, dim=-1) + + # Dot product attention w/ qk norm + q = self.q_norm(q.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) + k = self.k_norm(k.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) + v = v.view(B, N, self.num_heads, self.head_dim).transpose(1, 2) + if self.fused_attn: + x_attn = F.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x_attn = attn @ v + x_attn = x_attn.transpose(1, 2).reshape(B, N, C) + x_attn = self.attn_out_proj(x_attn) + + # MLP activation, dropout, fc2 + x_mlp = self.mlp_act(x_mlp) + x_mlp = self.mlp_drop(x_mlp) + x_mlp = self.mlp_out_proj(x_mlp) + + # Add residual w/ drop path & layer scale applied + y = self.drop_path(self.ls(x_attn + x_mlp)) + x = x + y + return x + + +class ParallelThingsBlock(nn.Module): + """ Parallel ViT block (N parallel attention followed by N parallel MLP) + Based on: + `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 + """ + def __init__( + self, + dim: int, + num_heads: int, + num_parallel: int = 2, + mlp_ratio: float = 4., + qkv_bias: bool = False, + qk_norm: bool = False, + init_values: Optional[float] = None, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + act_layer: nn.Module = nn.GELU, + norm_layer: nn.Module = nn.LayerNorm, + mlp_layer: nn.Module = Mlp, + ) -> None: + super().__init__() + self.num_parallel = num_parallel + self.attns = nn.ModuleList() + self.ffns = nn.ModuleList() + for _ in range(num_parallel): + self.attns.append(nn.Sequential(OrderedDict([ + ('norm', norm_layer(dim)), + ('attn', Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + )), + ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), + ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) + ]))) + self.ffns.append(nn.Sequential(OrderedDict([ + ('norm', norm_layer(dim)), + ('mlp', mlp_layer( + dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + )), + ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), + ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) + ]))) + + def _forward_jit(self, x: torch.Tensor) -> torch.Tensor: + x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0) + x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0) + return x + + @torch.jit.ignore + def _forward(self, x: torch.Tensor) -> torch.Tensor: + x = x + sum(attn(x) for attn in self.attns) + x = x + sum(ffn(x) for ffn in self.ffns) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting() or torch.jit.is_tracing(): + return self._forward_jit(x) + else: + return self._forward(x) + + +def global_pool_nlc( + x: torch.Tensor, + pool_type: str = 'token', + num_prefix_tokens: int = 1, + reduce_include_prefix: bool = False, +): + if not pool_type: + return x + + if pool_type == 'token': + x = x[:, 0] # class token + else: + x = x if reduce_include_prefix else x[:, num_prefix_tokens:] + if pool_type == 'avg': + x = x.mean(dim=1) + elif pool_type == 'avgmax': + x = 0.5 * (x.amax(dim=1) + x.mean(dim=1)) + elif pool_type == 'max': + x = x.amax(dim=1) + else: + assert not pool_type, f'Unknown pool type {pool_type}' + + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + """ + dynamic_img_size: Final[bool] + + def __init__( + self, + img_size: Union[int, Tuple[int, int]] = 224, + patch_size: Union[int, Tuple[int, int]] = 16, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: Literal['', 'avg', 'avgmax', 'max', 'token', 'map'] = 'token', + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4., + qkv_bias: bool = True, + qk_norm: bool = False, + init_values: Optional[float] = None, + class_token: bool = True, + pos_embed: str = 'learn', + no_embed_class: bool = False, + reg_tokens: int = 0, + pre_norm: bool = False, + final_norm: bool = True, + fc_norm: Optional[bool] = None, + dynamic_img_size: bool = False, + dynamic_img_pad: bool = False, + drop_rate: float = 0., + pos_drop_rate: float = 0., + patch_drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + weight_init: Literal['skip', 'jax', 'jax_nlhb', 'moco', ''] = '', + fix_init: bool = False, + embed_layer: Callable = PatchEmbed, + norm_layer: Optional[LayerType] = None, + act_layer: Optional[LayerType] = None, + block_fn: Type[nn.Module] = Block, + mlp_layer: Type[nn.Module] = Mlp, + ) -> None: + """ + Args: + img_size: Input image size. + patch_size: Patch size. + in_chans: Number of image input channels. + num_classes: Number of classes for classification head. + global_pool: Type of global pooling for final sequence (default: 'token'). + embed_dim: Transformer embedding dimension. + depth: Depth of transformer. + num_heads: Number of attention heads. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: Enable bias for qkv projections if True. + init_values: Layer-scale init values (layer-scale enabled if not None). + class_token: Use class token. + no_embed_class: Don't include position embeddings for class (or reg) tokens. + reg_tokens: Number of register tokens. + pre_norm: Enable norm after embeddings, before transformer blocks (standard in CLIP ViT). + final_norm: Enable norm after transformer blocks, before head (standard in most ViT). + fc_norm: Move final norm after pool (instead of before), if None, enabled when global_pool == 'avg'. + drop_rate: Head dropout rate. + pos_drop_rate: Position embedding dropout rate. + attn_drop_rate: Attention dropout rate. + drop_path_rate: Stochastic depth rate. + weight_init: Weight initialization scheme. + fix_init: Apply weight initialization fix (scaling w/ layer index). + embed_layer: Patch embedding layer. + norm_layer: Normalization layer. + act_layer: MLP activation layer. + block_fn: Transformer block layer. + """ + super().__init__() + assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map') + assert class_token or global_pool != 'token' + assert pos_embed in ('', 'none', 'learn') + use_fc_norm = global_pool in ('avg', 'avgmax', 'max') if fc_norm is None else fc_norm + norm_layer = get_norm_layer(norm_layer) or partial(nn.LayerNorm, eps=1e-6) + act_layer = get_act_layer(act_layer) or nn.GELU + + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.num_prefix_tokens = 1 if class_token else 0 + self.num_prefix_tokens += reg_tokens + self.num_reg_tokens = reg_tokens + self.has_class_token = class_token + self.no_embed_class = no_embed_class # don't embed prefix positions (includes reg) + self.dynamic_img_size = dynamic_img_size + self.grad_checkpointing = False + + embed_args = {} + if dynamic_img_size: + # flatten deferred until after pos embed + embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) + self.patch_embed = embed_layer( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP) + dynamic_img_pad=dynamic_img_pad, + **embed_args, + ) + num_patches = self.patch_embed.num_patches + reduction = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None + self.reg_token = nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None + embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens + if not pos_embed or pos_embed == 'none': + self.pos_embed = None + else: + self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) + self.pos_drop = nn.Dropout(p=pos_drop_rate) + if patch_drop_rate > 0: + self.patch_drop = PatchDropout( + patch_drop_rate, + num_prefix_tokens=self.num_prefix_tokens, + ) + else: + self.patch_drop = nn.Identity() + self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + init_values=init_values, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + mlp_layer=mlp_layer, + ) + for i in range(depth)]) + self.feature_info = [ + dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=reduction) for i in range(depth)] + self.norm = norm_layer(embed_dim) if final_norm and not use_fc_norm else nn.Identity() + + # Classifier Head + if global_pool == 'map': + self.attn_pool = AttentionPoolLatent( + self.embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + norm_layer=norm_layer, + ) + else: + self.attn_pool = None + self.fc_norm = norm_layer(embed_dim) if final_norm and use_fc_norm else nn.Identity() + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if weight_init != 'skip': + self.init_weights(weight_init) + if fix_init: + self.fix_init_weight() + + def fix_init_weight(self): + def rescale(param, _layer_id): + param.div_(math.sqrt(2.0 * _layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def init_weights(self, mode: str = '') -> None: + assert mode in ('jax', 'jax_nlhb', 'moco', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + if self.cls_token is not None: + nn.init.normal_(self.cls_token, std=1e-6) + if self.reg_token is not None: + nn.init.normal_(self.reg_token, std=1e-6) + named_apply(get_init_weights_vit(mode, head_bias), self) + + def _init_weights(self, m: nn.Module) -> None: + # this fn left here for compat with downstream users + init_weights_vit_timm(m) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path: str, prefix: str = '') -> None: + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self) -> Set: + return {'pos_embed', 'cls_token', 'dist_token'} + + @torch.jit.ignore + def group_matcher(self, coarse: bool = False) -> Dict: + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable: bool = True) -> None: + self.grad_checkpointing = enable + if hasattr(self.patch_embed, 'set_grad_checkpointing'): + self.patch_embed.set_grad_checkpointing(enable) + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map') + if global_pool == 'map' and self.attn_pool is None: + assert False, "Cannot currently add attention pooling in reset_classifier()." + elif global_pool != 'map' and self.attn_pool is not None: + self.attn_pool = None # remove attention pooling + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def set_input_size( + self, + img_size: Optional[Tuple[int, int]] = None, + patch_size: Optional[Tuple[int, int]] = None, + ): + """Method updates the input image resolution, patch size + + Args: + img_size: New input resolution, if None current resolution is used + patch_size: New patch size, if None existing patch size is used + """ + prev_grid_size = self.patch_embed.grid_size + self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) + if self.pos_embed is not None: + num_prefix_tokens = 0 if self.no_embed_class else self.num_prefix_tokens + num_new_tokens = self.patch_embed.num_patches + num_prefix_tokens + if num_new_tokens != self.pos_embed.shape[1]: + self.pos_embed = nn.Parameter(resample_abs_pos_embed( + self.pos_embed, + new_size=self.patch_embed.grid_size, + old_size=prev_grid_size, + num_prefix_tokens=num_prefix_tokens, + verbose=True, + )) + + def _pos_embed(self, x: torch.Tensor) -> torch.Tensor: + if self.pos_embed is None: + return x.view(x.shape[0], -1, x.shape[-1]) + + if self.dynamic_img_size: + B, H, W, C = x.shape + prev_grid_size = self.patch_embed.grid_size + pos_embed = resample_abs_pos_embed( + self.pos_embed, + new_size=(H, W), + old_size=prev_grid_size, + num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens, + ) + x = x.view(B, -1, C) + else: + pos_embed = self.pos_embed + + to_cat = [] + if self.cls_token is not None: + to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) + if self.reg_token is not None: + to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) + + if self.no_embed_class: + # deit-3, updated JAX (big vision) + # position embedding does not overlap with class token, add then concat + x = x + pos_embed + if to_cat: + x = torch.cat(to_cat + [x], dim=1) + else: + # original timm, JAX, and deit vit impl + # pos_embed has entry for class token, concat then add + if to_cat: + x = torch.cat(to_cat + [x], dim=1) + x = x + pos_embed + + return self.pos_drop(x) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + return_prefix_tokens: bool = False, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + return_prefix_tokens: Return both prefix and spatial intermediate tokens + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass + B, _, height, width = x.shape + x = self.patch_embed(x) + x = self._pos_embed(x) + x = self.patch_drop(x) + x = self.norm_pre(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x) + if i in take_indices: + # normalize intermediates with final norm layer if enabled + intermediates.append(self.norm(x) if norm else x) + + # process intermediates + if self.num_prefix_tokens: + # split prefix (e.g. class, distill) and spatial feature tokens + prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] + intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] + if reshape: + # reshape to BCHW output format + H, W = self.patch_embed.dynamic_feat_size((height, width)) + intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] + if not torch.jit.is_scripting() and return_prefix_tokens: + # return_prefix not support in torchscript due to poor type handling + intermediates = list(zip(intermediates, prefix_tokens)) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.fc_norm = nn.Identity() + self.reset_classifier(0, '') + return take_indices + + def get_intermediate_layers( + self, + x: torch.Tensor, + n: Union[int, List[int], Tuple[int]] = 1, + reshape: bool = False, + return_prefix_tokens: bool = False, + norm: bool = False, + ) -> List[torch.Tensor]: + """ Intermediate layer accessor inspired by DINO / DINOv2 interface. + NOTE: This API is for backwards compat, favour using forward_intermediates() directly. + """ + return self.forward_intermediates( + x, n, + return_prefix_tokens=return_prefix_tokens, + norm=norm, + output_fmt='NCHW' if reshape else 'NLC', + intermediates_only=True, + ) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + x = self._pos_embed(x) + x = self.patch_drop(x) + x = self.norm_pre(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.norm(x) + return x + + def pool(self, x: torch.Tensor, pool_type: Optional[str] = None) -> torch.Tensor: + if self.attn_pool is not None: + x = self.attn_pool(x) + return x + pool_type = self.global_pool if pool_type is None else pool_type + x = global_pool_nlc(x, pool_type=pool_type, num_prefix_tokens=self.num_prefix_tokens) + return x + + def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: + x = self.pool(x) + x = self.fc_norm(x) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def init_weights_vit_timm(module: nn.Module, name: str = '') -> None: + """ ViT weight initialization, original timm impl (for reproducibility) """ + if isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.0) -> None: + """ ViT weight initialization, matching JAX (Flax) impl """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_moco(module: nn.Module, name: str = '') -> None: + """ ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """ + if isinstance(module, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) + nn.init.uniform_(module.weight, -val, val) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def get_init_weights_vit(mode: str = 'jax', head_bias: float = 0.0) -> Callable: + if 'jax' in mode: + return partial(init_weights_vit_jax, head_bias=head_bias) + elif 'moco' in mode: + return init_weights_vit_moco + else: + return init_weights_vit_timm + + +def resize_pos_embed( + posemb: torch.Tensor, + posemb_new: torch.Tensor, + num_prefix_tokens: int = 1, + gs_new: Tuple[int, int] = (), + interpolation: str = 'bicubic', + antialias: bool = False, +) -> torch.Tensor: + """ Rescale the grid of position embeddings when loading from state_dict. + *DEPRECATED* This function is being deprecated in favour of using resample_abs_pos_embed + """ + ntok_new = posemb_new.shape[1] - num_prefix_tokens + ntok_old = posemb.shape[1] - num_prefix_tokens + gs_old = [int(math.sqrt(ntok_old))] * 2 + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + return resample_abs_pos_embed( + posemb, gs_new, gs_old, + num_prefix_tokens=num_prefix_tokens, + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = '') -> None: + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True, idx=None): + if idx is not None: + w = w[idx] + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + interpolation = 'bilinear' + antialias = False + big_vision = False + if not prefix: + if 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + elif 'params/embedding/kernel' in w: + prefix = 'params/' + big_vision = True + elif 'params/img/embedding/kernel' in w: + prefix = 'params/img/' + big_vision = True + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + if embed_conv_w.shape[-2:] != model.patch_embed.proj.weight.shape[-2:]: + embed_conv_w = resample_patch_embed( + embed_conv_w, + model.patch_embed.proj.weight.shape[-2:], + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + if model.cls_token is not None: + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + if big_vision: + pos_embed_w = _n2p(w[f'{prefix}pos_embedding'], t=False) + else: + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + old_shape = pos_embed_w.shape + num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) + pos_embed_w = resample_abs_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, + new_size=model.patch_embed.grid_size, + num_prefix_tokens=num_prefix_tokens, + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if (isinstance(model.head, nn.Linear) and + f'{prefix}head/bias' in w and + model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]): + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights + # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + if model.attn_pool is not None: + block_prefix = f'{prefix}MAPHead_0/' + mha_prefix = block_prefix + f'MultiHeadDotProductAttention_0/' + model.attn_pool.latent.copy_(_n2p(w[f'{block_prefix}probe'], t=False)) + model.attn_pool.kv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('key', 'value')])) + model.attn_pool.kv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('key', 'value')])) + model.attn_pool.q.weight.copy_(_n2p(w[f'{mha_prefix}query/kernel'], t=False).flatten(1).T) + model.attn_pool.q.bias.copy_(_n2p(w[f'{mha_prefix}query/bias'], t=False).reshape(-1)) + model.attn_pool.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + model.attn_pool.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + model.attn_pool.norm.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + model.attn_pool.norm.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + for r in range(2): + getattr(model.attn_pool.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/kernel'])) + getattr(model.attn_pool.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/bias'])) + + mha_sub, b_sub, ln1_sub = (0, 0, 1) if big_vision else (1, 3, 2) + for i, block in enumerate(model.blocks.children()): + if f'{prefix}Transformer/encoderblock/LayerNorm_0/scale' in w: + block_prefix = f'{prefix}Transformer/encoderblock/' + idx = i + else: + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + idx = None + mha_prefix = block_prefix + f'MultiHeadDotProductAttention_{mha_sub}/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'], idx=idx)) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'], idx=idx)) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False, idx=idx).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False, idx=idx).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel'], idx=idx).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'], idx=idx)) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/scale'], idx=idx)) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/bias'], idx=idx)) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_( + _n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/kernel'], idx=idx)) + getattr(block.mlp, f'fc{r + 1}').bias.copy_( + _n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/bias'], idx=idx)) + + +def _convert_openai_clip( + state_dict: Dict[str, torch.Tensor], + model: VisionTransformer, + prefix: str = 'visual.', +) -> Dict[str, torch.Tensor]: + out_dict = {} + swaps = [ + ('conv1', 'patch_embed.proj'), + ('positional_embedding', 'pos_embed'), + ('transformer.resblocks.', 'blocks.'), + ('ln_pre', 'norm_pre'), + ('ln_post', 'norm'), + ('ln_', 'norm'), + ('in_proj_', 'qkv.'), + ('out_proj', 'proj'), + ('mlp.c_fc', 'mlp.fc1'), + ('mlp.c_proj', 'mlp.fc2'), + ] + for k, v in state_dict.items(): + if not k.startswith(prefix): + continue + k = k.replace(prefix, '') + for sp in swaps: + k = k.replace(sp[0], sp[1]) + + if k == 'proj': + k = 'head.weight' + v = v.transpose(0, 1) + out_dict['head.bias'] = torch.zeros(v.shape[0]) + elif k == 'class_embedding': + k = 'cls_token' + v = v.unsqueeze(0).unsqueeze(1) + elif k == 'pos_embed': + v = v.unsqueeze(0) + out_dict[k] = v + return out_dict + + +def _convert_dinov2( + state_dict: Dict[str, torch.Tensor], + model: VisionTransformer, +) -> Dict[str, torch.Tensor]: + import re + out_dict = {} + state_dict.pop("mask_token", None) + if 'register_tokens' in state_dict: + # convert dinov2 w/ registers to no_embed_class timm model (neither cls or reg tokens overlap pos embed) + out_dict['reg_token'] = state_dict.pop('register_tokens') + out_dict['cls_token'] = state_dict.pop('cls_token') + state_dict['pos_embed'][:, 0] + out_dict['pos_embed'] = state_dict.pop('pos_embed')[:, 1:] + for k, v in state_dict.items(): + if re.match(r"blocks\.(\d+)\.mlp\.w12\.(?:weight|bias)", k): + out_dict[k.replace("w12", "fc1")] = v + continue + elif re.match(r"blocks\.(\d+)\.mlp\.w3\.(?:weight|bias)", k): + out_dict[k.replace("w3", "fc2")] = v + continue + out_dict[k] = v + return out_dict + + +def checkpoint_filter_fn( + state_dict: Dict[str, torch.Tensor], + model: VisionTransformer, + adapt_layer_scale: bool = False, + interpolation: str = 'bicubic', + antialias: bool = True, +) -> Dict[str, torch.Tensor]: + """ convert patch embedding weight from manual patchify + linear proj to conv""" + import re + out_dict = {} + state_dict = state_dict.get('model', state_dict) + state_dict = state_dict.get('state_dict', state_dict) + prefix = '' + + if 'visual.class_embedding' in state_dict: + state_dict = _convert_openai_clip(state_dict, model) + elif 'module.visual.class_embedding' in state_dict: + state_dict = _convert_openai_clip(state_dict, model, prefix='module.visual.') + elif "mask_token" in state_dict: + state_dict = _convert_dinov2(state_dict, model) + elif "encoder" in state_dict: + # IJEPA, vit in an 'encoder' submodule + state_dict = state_dict['encoder'] + prefix = 'module.' + elif 'visual.trunk.pos_embed' in state_dict or 'visual.trunk.blocks.0.norm1.weight' in state_dict: + # OpenCLIP model with timm vision encoder + prefix = 'visual.trunk.' + if 'visual.head.proj.weight' in state_dict and isinstance(model.head, nn.Linear): + # remap final nn.Linear if it exists outside of the timm .trunk (ie in visual.head.proj) + out_dict['head.weight'] = state_dict['visual.head.proj.weight'] + out_dict['head.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) + + if prefix: + # filter on & remove prefix string from keys + state_dict = {k[len(prefix):]: v for k, v in state_dict.items() if k.startswith(prefix)} + + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k: + O, I, H, W = model.patch_embed.proj.weight.shape + if len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + if v.shape[-1] != W or v.shape[-2] != H: + v = resample_patch_embed( + v, + (H, W), + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: + # To resize pos embedding when using model at different size from pretrained weights + num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) + v = resample_abs_pos_embed( + v, + new_size=model.patch_embed.grid_size, + num_prefix_tokens=num_prefix_tokens, + interpolation=interpolation, + antialias=antialias, + verbose=True, + ) + elif adapt_layer_scale and 'gamma_' in k: + # remap layer-scale gamma into sub-module (deit3 models) + k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) + elif 'pre_logits' in k: + # NOTE representation layer removed as not used in latest 21k/1k pretrained weights + continue + out_dict[k] = v + return out_dict + + +def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: + return { + 'url': url, + 'num_classes': 1000, + 'input_size': (3, 224, 224), + 'pool_size': None, + 'crop_pct': 0.9, + 'interpolation': 'bicubic', + 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, + 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', + 'classifier': 'head', + **kwargs, + } + +default_cfgs = { + + # re-finetuned augreg 21k FT on in1k weights + 'vit_base_patch16_224.augreg2_in21k_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'vit_base_patch16_384.augreg2_in21k_ft_in1k': _cfg(), + 'vit_base_patch8_224.augreg2_in21k_ft_in1k': _cfg( + hf_hub_id='timm/'), + + # How to train your ViT (augreg) weights, pretrained on 21k FT on in1k + 'vit_tiny_patch16_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_tiny_patch16_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch32_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_small_patch32_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch16_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_small_patch16_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_base_patch32_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_base_patch16_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch8_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_large_patch16_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_large_patch16_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + + # patch models (weights from official Google JAX impl) pretrained on in21k FT on in1k + 'vit_base_patch16_224.orig_in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', + hf_hub_id='timm/'), + 'vit_base_patch16_384.orig_in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch32_384.orig_in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0), + + # How to train your ViT (augreg) weights trained on in1k only + 'vit_small_patch16_224.augreg_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_small_patch16_384.augreg_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224.augreg_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_base_patch32_384.augreg_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224.augreg_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', + hf_hub_id='timm/', + custom_load=True), + 'vit_base_patch16_384.augreg_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + hf_hub_id='timm/', + custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), + + 'vit_large_patch14_224.untrained': _cfg(url=''), + 'vit_huge_patch14_224.untrained': _cfg(url=''), + 'vit_giant_patch14_224.untrained': _cfg(url=''), + 'vit_gigantic_patch14_224.untrained': _cfg(url=''), + + # patch models, imagenet21k (weights from official Google JAX impl), classifier not valid + 'vit_base_patch32_224.orig_in21k': _cfg( + #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth', + hf_hub_id='timm/', + num_classes=0), + 'vit_base_patch16_224.orig_in21k': _cfg( + #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth', + hf_hub_id='timm/', + num_classes=0), + 'vit_large_patch32_224.orig_in21k': _cfg( + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', + hf_hub_id='timm/', + num_classes=0), + 'vit_large_patch16_224.orig_in21k': _cfg( + #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth', + hf_hub_id='timm/', + num_classes=0), + 'vit_huge_patch14_224.orig_in21k': _cfg( + hf_hub_id='timm/', + num_classes=0), + + # How to train your ViT (augreg) weights, pretrained on in21k + 'vit_tiny_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + custom_load=True, num_classes=21843), + 'vit_small_patch32_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + custom_load=True, num_classes=21843), + 'vit_small_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + custom_load=True, num_classes=21843), + 'vit_base_patch32_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + custom_load=True, num_classes=21843), + 'vit_base_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + custom_load=True, num_classes=21843), + 'vit_base_patch8_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + custom_load=True, num_classes=21843), + 'vit_large_patch16_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', + hf_hub_id='timm/', + custom_load=True, num_classes=21843), + + # SAM trained models (https://arxiv.org/abs/2106.01548) + 'vit_base_patch32_224.sam_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True, + hf_hub_id='timm/'), + 'vit_base_patch16_224.sam_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True, + hf_hub_id='timm/'), + + # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) + 'vit_small_patch16_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_small_patch8_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch16_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch8_224.dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + + # DINOv2 pretrained - https://arxiv.org/abs/2304.07193 (no classifier head, for fine-tune/features only) + 'vit_small_patch14_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + 'vit_base_patch14_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + 'vit_large_patch14_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + 'vit_giant_patch14_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + + # DINOv2 pretrained w/ registers - https://arxiv.org/abs/2309.16588 (no classifier head, for fine-tune/features only) + 'vit_small_patch14_reg4_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + 'vit_base_patch14_reg4_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + 'vit_large_patch14_reg4_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + 'vit_giant_patch14_reg4_dinov2.lvd142m': _cfg( + url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_pretrain.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 518, 518), crop_pct=1.0), + + # ViT ImageNet-21K-P pretraining by MILL + 'vit_base_patch16_224_miil.in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', + hf_hub_id='timm/', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221), + 'vit_base_patch16_224_miil.in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', + hf_hub_id='timm/', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'), + + # Custom timm variants + 'vit_base_patch16_rpn_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth', + hf_hub_id='timm/'), + 'vit_medium_patch16_gap_240.sw_in12k': _cfg( + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821), + 'vit_medium_patch16_gap_256.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_medium_patch16_gap_384.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=0.95, crop_mode='squash'), + 'vit_base_patch16_gap_224': _cfg(), + + # CLIP pretrained image tower and related fine-tuned weights + 'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch32_clip_384.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), + 'vit_base_patch32_clip_448.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)), + 'vit_base_patch16_clip_224.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), + 'vit_base_patch16_clip_384.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), + 'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), + 'vit_large_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), + 'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + 'vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), + + 'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg( + # hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k_in1k', # FIXME weight exists, need to push + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch32_clip_384.openai_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), + 'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), + 'vit_base_patch16_clip_384.openai_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), + 'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + 'vit_large_patch14_clip_336.openai_ft_in12k_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), + + 'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch16_clip_224.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + 'vit_base_patch16_clip_384.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), + 'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), + 'vit_large_patch14_clip_336.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), + 'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + 'vit_huge_patch14_clip_336.laion2b_ft_in1k': _cfg( + hf_hub_id='', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), + + 'vit_base_patch32_clip_224.openai_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch16_clip_224.openai_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), + 'vit_base_patch16_clip_384.openai_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), + 'vit_large_patch14_clip_224.openai_ft_in1k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), + + 'vit_base_patch32_clip_224.laion2b_ft_in12k': _cfg( + #hf_hub_id='timm/vit_base_patch32_clip_224.laion2b_ft_in12k', # FIXME weight exists, need to push + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_base_patch16_clip_224.laion2b_ft_in12k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=11821), + 'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), + + 'vit_base_patch32_clip_224.openai_ft_in12k': _cfg( + # hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k', # FIXME weight exists, need to push + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_base_patch16_clip_224.openai_ft_in12k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), + 'vit_large_patch14_clip_224.openai_ft_in12k': _cfg( + hf_hub_id='timm/', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), + + 'vit_base_patch32_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_base_patch16_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-B-16-laion2B-s34B-b88K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_large_patch14_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=768), + 'vit_huge_patch14_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), + 'vit_giant_patch14_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), + 'vit_gigantic_patch14_clip_224.laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), + + 'vit_base_patch32_clip_224.laion400m_e32': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_base_patch16_clip_224.laion400m_e32': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_base_patch16_plus_clip_240.laion400m_e32': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 240, 240), crop_pct=1.0, num_classes=512), + 'vit_large_patch14_clip_224.laion400m_e32': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), + + 'vit_base_patch32_clip_224.datacompxl': _cfg( + hf_hub_id='laion/CLIP-ViT-B-32-DataComp.XL-s13B-b90K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_base_patch32_clip_256.datacompxl': _cfg( + hf_hub_id='laion/CLIP-ViT-B-32-256x256-DataComp-s34B-b86K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 256, 256), num_classes=512), + 'vit_base_patch16_clip_224.datacompxl': _cfg( + hf_hub_id='laion/CLIP-ViT-B-16-DataComp.XL-s13B-b90K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_large_patch14_clip_224.datacompxl': _cfg( + hf_hub_id='laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), + + 'vit_base_patch16_clip_224.dfn2b': _cfg( + hf_hub_id='apple/DFN2B-CLIP-ViT-B-16', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_large_patch14_clip_224.dfn2b': _cfg( + hf_hub_id='apple/DFN2B-CLIP-ViT-L-14', + hf_hub_filename='open_clip_pytorch_model.bin', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), + 'vit_huge_patch14_clip_224.dfn5b': _cfg( + hf_hub_id='apple/DFN5B-CLIP-ViT-H-14', + hf_hub_filename='open_clip_pytorch_model.bin', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), + 'vit_huge_patch14_clip_378.dfn5b': _cfg( + hf_hub_id='apple/DFN5B-CLIP-ViT-H-14-378', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + notes=('natively QuickGELU, use quickgelu model variant for original results',), + crop_pct=1.0, input_size=(3, 378, 378), num_classes=1024), + + 'vit_base_patch32_clip_224.metaclip_2pt5b': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_base_patch16_clip_224.metaclip_2pt5b': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_large_patch14_clip_224.metaclip_2pt5b': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), + 'vit_huge_patch14_clip_224.metaclip_2pt5b': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), + 'vit_gigantic_patch14_clip_224.metaclip_2pt5b': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), + 'vit_base_patch32_clip_224.metaclip_400m': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_base_patch16_clip_224.metaclip_400m': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), + 'vit_large_patch14_clip_224.metaclip_400m': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + license='cc-by-nc-4.0', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), + + 'vit_base_patch32_clip_224.openai': _cfg( + hf_hub_id='timm/', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_base_patch16_clip_224.openai': _cfg( + hf_hub_id='timm/', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_large_patch14_clip_224.openai': _cfg( + hf_hub_id='timm/', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), + 'vit_large_patch14_clip_336.openai': _cfg( + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', + notes=('natively QuickGELU, use quickgelu model variant for original results',), + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + crop_pct=1.0, input_size=(3, 336, 336), num_classes=768), + + # experimental (may be removed) + 'vit_base_patch32_plus_256.untrained': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), + 'vit_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), + 'vit_small_patch16_36x1_224.untrained': _cfg(url=''), + 'vit_small_patch16_18x2_224.untrained': _cfg(url=''), + 'vit_base_patch16_18x2_224.untrained': _cfg(url=''), + + # EVA fine-tuned weights from MAE style MIM - EVA-CLIP target pretrain + # https://github.com/baaivision/EVA/blob/7ecf2c0a370d97967e86d047d7af9188f78d2df3/eva/README.md#eva-l-learning-better-mim-representations-from-eva-clip + 'eva_large_patch14_196.in22k_ft_in22k_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_21k_to_1k_ft_88p6.pt', + hf_hub_id='timm/', license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 196, 196), crop_pct=1.0), + 'eva_large_patch14_336.in22k_ft_in22k_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_21k_to_1k_ft_89p2.pt', + hf_hub_id='timm/', license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), + 'eva_large_patch14_196.in22k_ft_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_1k_ft_88p0.pt', + hf_hub_id='timm/', license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 196, 196), crop_pct=1.0), + 'eva_large_patch14_336.in22k_ft_in1k': _cfg( + # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_1k_ft_88p65.pt', + hf_hub_id='timm/', license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, + input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), + + 'flexivit_small.1200ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + 'flexivit_small.600ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_600ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + 'flexivit_small.300ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_300ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + + 'flexivit_base.1200ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + 'flexivit_base.600ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_600ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + 'flexivit_base.300ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_300ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + 'flexivit_base.1000ep_in21k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_1000ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), + 'flexivit_base.300ep_in21k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_300ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), + + 'flexivit_large.1200ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + 'flexivit_large.600ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_600ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + 'flexivit_large.300ep_in1k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_300ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95), + + 'flexivit_base.patch16_in21k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/vit_b16_i21k_300ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), + 'flexivit_base.patch30_in21k': _cfg( + url='https://storage.googleapis.com/big_vision/flexivit/vit_b30_i21k_300ep.npz', custom_load=True, + hf_hub_id='timm/', + input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), + + 'vit_base_patch16_xp_224.untrained': _cfg(url=''), + 'vit_large_patch14_xp_224.untrained': _cfg(url=''), + 'vit_huge_patch14_xp_224.untrained': _cfg(url=''), + + 'vit_base_patch16_224.mae': _cfg( + url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth', + hf_hub_id='timm/', + license='cc-by-nc-4.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_large_patch16_224.mae': _cfg( + url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth', + hf_hub_id='timm/', + license='cc-by-nc-4.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_huge_patch14_224.mae': _cfg( + url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth', + hf_hub_id='timm/', + license='cc-by-nc-4.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + + 'vit_huge_patch14_gap_224.in1k_ijepa': _cfg( + url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar', + # hf_hub_id='timm/', + license='cc-by-nc-4.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_huge_patch14_gap_224.in22k_ijepa': _cfg( + url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar', + # hf_hub_id='timm/', + license='cc-by-nc-4.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_huge_patch16_gap_448.in1k_ijepa': _cfg( + url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar', + # hf_hub_id='timm/', + license='cc-by-nc-4.0', + input_size=(3, 448, 448), crop_pct=1.0, + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_giant_patch16_gap_224.in22k_ijepa': _cfg( + url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar', + # hf_hub_id='timm/', + license='cc-by-nc-4.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + + 'vit_base_patch16_siglip_224.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0), + 'vit_base_patch16_siglip_256.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_base_patch16_siglip_256.webli_i18n': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-i18n-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_base_patch16_siglip_384.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 384, 384), + num_classes=0), + 'vit_base_patch16_siglip_512.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-512', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 512, 512), + num_classes=0), + 'vit_large_patch16_siglip_256.webli': _cfg( + hf_hub_id='timm/ViT-L-16-SigLIP-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_large_patch16_siglip_384.webli': _cfg( + hf_hub_id='timm/ViT-L-16-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 384, 384), + num_classes=0), + 'vit_so400m_patch14_siglip_224.webli': _cfg( + hf_hub_id='timm/ViT-SO400M-14-SigLIP', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0), + 'vit_so400m_patch16_siglip_256.webli_i18n': _cfg( + hf_hub_id='timm/ViT-SO400M-16-SigLIP-i18n-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_so400m_patch14_siglip_378.webli': _cfg( + hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 378, 378), + num_classes=0), + 'vit_so400m_patch14_siglip_384.webli': _cfg( + hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 384, 384), + num_classes=0), + + 'vit_base_patch16_siglip_gap_224.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0), + 'vit_base_patch16_siglip_gap_256.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_base_patch16_siglip_gap_256.webli_i18n': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-i18n-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_base_patch16_siglip_gap_384.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 384, 384), + num_classes=0), + 'vit_base_patch16_siglip_gap_512.webli': _cfg( + hf_hub_id='timm/ViT-B-16-SigLIP-512', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 512, 512), + num_classes=0), + 'vit_large_patch16_siglip_gap_256.webli': _cfg( + hf_hub_id='timm/ViT-L-16-SigLIP-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_large_patch16_siglip_gap_384.webli': _cfg( + hf_hub_id='timm/ViT-L-16-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 384, 384), + num_classes=0), + 'vit_so400m_patch14_siglip_gap_224.webli': _cfg( + hf_hub_id='timm/ViT-SO400M-14-SigLIP', + hf_hub_filename='open_clip_pytorch_model.bin', + num_classes=0), + 'vit_so400m_patch14_siglip_gap_224.pali_mix': _cfg( + hf_hub_id='google/paligemma-3b-mix-224-jax', + hf_hub_filename='paligemma-3b-mix-224.npz', + custom_load='hf', + num_classes=0), + 'vit_so400m_patch14_siglip_gap_224.pali_pt': _cfg( + hf_hub_id='google/paligemma-3b-pt-224-jax', + hf_hub_filename='paligemma-3b-pt-224.npz', + custom_load='hf', + num_classes=0), + 'vit_so400m_patch16_siglip_gap_256.webli_i18n': _cfg( + hf_hub_id='timm/ViT-SO400M-16-SigLIP-i18n-256', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 256, 256), + num_classes=0), + 'vit_so400m_patch14_siglip_gap_378.webli': _cfg( + hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 378, 378), crop_pct=1.0, + num_classes=0), + 'vit_so400m_patch14_siglip_gap_384.webli': _cfg( + hf_hub_id='timm/ViT-SO400M-14-SigLIP-384', + hf_hub_filename='open_clip_pytorch_model.bin', + input_size=(3, 384, 384), crop_pct=1.0, + num_classes=0), + 'vit_so400m_patch14_siglip_gap_448.pali_mix': _cfg( + hf_hub_id='google/paligemma-3b-mix-448-jax', + hf_hub_filename='paligemma-3b-mix-448.npz', + custom_load='hf', + input_size=(3, 448, 448), crop_pct=1.0, + num_classes=0), + 'vit_so400m_patch14_siglip_gap_448.pali_pt': _cfg( + hf_hub_id='google/paligemma-3b-pt-448-jax', + hf_hub_filename='paligemma-3b-pt-448.npz', + custom_load='hf', + input_size=(3, 448, 448), crop_pct=1.0, + num_classes=0), + 'vit_so400m_patch14_siglip_gap_896.pali_pt': _cfg( + hf_hub_id='google/paligemma-3b-pt-896-jax', + hf_hub_filename='paligemma-3b-pt-896.npz', + custom_load='hf', + input_size=(3, 896, 896), crop_pct=1.0, + num_classes=0), + + 'vit_so400m_patch14_siglip_378.webli_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 378, 378), crop_pct=1.0, crop_mode='squash', + ), + 'vit_so400m_patch14_siglip_gap_378.webli_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 378, 378), crop_pct=1.0, crop_mode='squash', + ), + + 'vit_xsmall_patch16_clip_224.tinyclip_yfcc15m': _cfg( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_medium_patch32_clip_224.tinyclip_laion400m': _cfg( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_medium_patch16_clip_224.tinyclip_yfcc15m': _cfg( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_betwixt_patch32_clip_224.tinyclip_laion400m': _cfg( + hf_hub_id='timm/', + hf_hub_filename='open_clip_pytorch_model.bin', + license='mit', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + + 'vit_wee_patch16_reg1_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_pwee_patch16_reg1_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_little_patch16_reg1_gap_256.sbb_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_little_patch16_reg1_gap_256.sbb_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_little_patch16_reg4_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_medium_patch16_reg1_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_medium_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_medium_patch16_reg4_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_medium_patch16_reg4_gap_256.sbb_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_mediumd_patch16_reg4_gap_256.sbb_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_betwixt_patch16_reg1_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_betwixt_patch16_reg4_gap_256.sbb_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_betwixt_patch16_reg4_gap_256.sbb_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95), + 'vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_reg4_gap_256.untrained': _cfg( + input_size=(3, 256, 256)), + + 'vit_so150m_patch16_reg4_gap_256.untrained': _cfg( + input_size=(3, 256, 256)), + 'vit_so150m_patch16_reg4_map_256.untrained': _cfg( + input_size=(3, 256, 256)), + + 'vit_intern300m_patch14_448.ogvl_dist': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, + input_size=(3, 448, 448), crop_pct=1.0, num_classes=0, + ), + + 'test_vit.r160_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 160, 160), crop_pct=0.95), + 'test_vit2.r160_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 160, 160), crop_pct=0.95), + 'test_vit3.r160_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 160, 160), crop_pct=0.95), +} + +_quick_gelu_cfgs = [n for n, c in default_cfgs.items() if c.get('notes', ()) and 'quickgelu' in c['notes'][0]] +for n in _quick_gelu_cfgs: + # generate quickgelu default cfgs based on contents of notes field + c = copy.deepcopy(default_cfgs[n]) + if c['hf_hub_id'] == 'timm/': + c['hf_hub_id'] = 'timm/' + n # need to use non-quickgelu model name for hub id + default_cfgs[n.replace('_clip_', '_clip_quickgelu_')] = c +default_cfgs = generate_default_cfgs(default_cfgs) + + +def _create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) -> VisionTransformer: + out_indices = kwargs.pop('out_indices', 3) + if 'flexi' in variant: + # FIXME Google FlexiViT pretrained models have a strong preference for bilinear patch / embed + # interpolation, other pretrained models resize better w/ anti-aliased bicubic interpolation. + _filter_fn = partial(checkpoint_filter_fn, interpolation='bilinear', antialias=False) + else: + _filter_fn = checkpoint_filter_fn + + # FIXME attn pool (currently only in siglip) params removed if pool disabled, is there a better soln? + strict = kwargs.pop('pretrained_strict', True) + if 'siglip' in variant and kwargs.get('global_pool', None) != 'map': + strict = False + + return build_model_with_cfg( + VisionTransformer, + variant, + pretrained, + pretrained_filter_fn=_filter_fn, + pretrained_strict=strict, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +@register_model +def vit_tiny_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Tiny (Vit-Ti/16) + """ + model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) + model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_tiny_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Tiny (Vit-Ti/16) @ 384x384. + """ + model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) + model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Small (ViT-S/32) + """ + model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Small (ViT-S/32) at 384x384. + """ + model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Small (ViT-S/16) + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Small (ViT-S/16) + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch8_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Small (ViT-S/8) + """ + model_args = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer('vit_small_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch8_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) + model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) + model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) + model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) + model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/14) + """ + model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16) + model = _create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + """ + model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16) + model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_giant_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + """ + model_args = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16) + model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_gigantic_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Gigantic (big-G) model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + """ + model_args = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16) + model = _create_vision_transformer( + 'vit_gigantic_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_224_miil(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False) + model = _create_vision_transformer( + 'vit_base_patch16_224_miil', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch16_gap_240(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 240x240 + """ + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, + global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) + model = _create_vision_transformer( + 'vit_medium_patch16_gap_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch16_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 256x256 + """ + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, + global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) + model = _create_vision_transformer( + 'vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch16_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 384x384 + """ + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, + global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) + model = _create_vision_transformer( + 'vit_medium_patch16_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_betwixt_patch16_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Betwixt (ViT-b/16) w/o class token, w/ avg-pool @ 256x256 + """ + model_args = dict( + patch_size=16, embed_dim=640, depth=12, num_heads=10, class_token=False, + global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) + model = _create_vision_transformer( + 'vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16) w/o class token, w/ avg-pool @ 224x224 + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) + model = _create_vision_transformer( + 'vit_base_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) w/ no class token, avg pool + """ + model_args = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) + model = _create_vision_transformer( + 'vit_huge_patch14_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch16_gap_448(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/16) w/ no class token, avg pool @ 448x448 + """ + model_args = dict( + patch_size=16, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) + model = _create_vision_transformer( + 'vit_huge_patch16_gap_448', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_giant_patch16_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Giant (little-gg) model (ViT-g/16) w/ no class token, avg pool + """ + model_args = dict( + patch_size=16, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=48/11, + class_token=False, global_pool='avg', fc_norm=False) + model = _create_vision_transformer( + 'vit_giant_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_xsmall_patch16_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + # TinyCLIP 8M + model_args = dict(embed_dim=256, depth=10, num_heads=4, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_xsmall_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch32_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + # TinyCLIP 40M + model_args = dict( + patch_size=32, embed_dim=512, depth=12, num_heads=8, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_medium_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch16_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + # TinyCLIP 39M + model_args = dict(embed_dim=512, depth=12, num_heads=8, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_medium_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_betwixt_patch32_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + # TinyCLIP 61M + model_args = dict( + patch_size=32, embed_dim=640, depth=12, num_heads=10, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_betwixt_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch32_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/32 CLIP image tower @ 224x224 + """ + model_args = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_base_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch32_clip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/32 CLIP image tower @ 256x256 + """ + model_args = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_base_patch32_clip_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch32_clip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/32 CLIP image tower @ 384x384 + """ + model_args = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_base_patch32_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch32_clip_448(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/32 CLIP image tower @ 448x448 + """ + model_args = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_base_patch32_clip_448', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/16 CLIP image tower + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_clip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/16 CLIP image tower @ 384x384 + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_base_patch16_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_plus_clip_240(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16+) CLIP image tower @ 240x240 + """ + model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_base_patch16_plus_clip_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/14) CLIP image tower + """ + model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_clip_336(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/14) CLIP image tower @ 336x336 + """ + model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) CLIP image tower. + """ + model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_huge_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_clip_336(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) CLIP image tower @ 336x336 + """ + model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_huge_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_clip_378(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) CLIP image tower @ 378x378 + """ + model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_huge_patch14_clip_378', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_giant_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + Pretrained weights from CLIP image tower. + """ + model_args = dict( + patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_gigantic_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-bigG model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + Pretrained weights from CLIP image tower. + """ + model_args = dict( + patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) + model = _create_vision_transformer( + 'vit_gigantic_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch32_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/32 CLIP image tower @ 224x224 + """ + model_args = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, + norm_layer=nn.LayerNorm, act_layer='quick_gelu') + model = _create_vision_transformer( + 'vit_base_patch32_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/16 CLIP image tower w/ QuickGELU act + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, + norm_layer=nn.LayerNorm, act_layer='quick_gelu') + model = _create_vision_transformer( + 'vit_base_patch16_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/14) CLIP image tower w/ QuickGELU act + """ + model_args = dict( + patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, + norm_layer=nn.LayerNorm, act_layer='quick_gelu') + model = _create_vision_transformer( + 'vit_large_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_clip_quickgelu_336(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/14) CLIP image tower @ 336x336 w/ QuickGELU act + """ + model_args = dict( + patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, + norm_layer=nn.LayerNorm, act_layer='quick_gelu') + model = _create_vision_transformer( + 'vit_large_patch14_clip_quickgelu_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) CLIP image tower w/ QuickGELU act. + """ + model_args = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, + norm_layer=nn.LayerNorm, act_layer='quick_gelu') + model = _create_vision_transformer( + 'vit_huge_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_clip_quickgelu_378(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) CLIP image tower @ 378x378 w/ QuickGELU act + """ + model_args = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, + norm_layer=nn.LayerNorm, act_layer='quick_gelu') + model = _create_vision_transformer( + 'vit_huge_patch14_clip_quickgelu_378', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_gigantic_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-bigG model (ViT-G/14) w/ QuickGELU act + """ + model_args = dict( + patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, pre_norm=True, + norm_layer=nn.LayerNorm, act_layer='quick_gelu') + model = _create_vision_transformer( + 'vit_gigantic_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +# Experimental models below + +@register_model +def vit_base_patch32_plus_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/32+) + """ + model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, init_values=1e-5) + model = _create_vision_transformer( + 'vit_base_patch32_plus_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_plus_240(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16+) + """ + model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, init_values=1e-5) + model = _create_vision_transformer( + 'vit_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_rpn_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base (ViT-B/16) w/ residual post-norm + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, init_values=1e-5, + class_token=False, block_fn=ResPostBlock, global_pool='avg') + model = _create_vision_transformer( + 'vit_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch16_36x1_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base w/ LayerScale + 36 x 1 (36 block serial) config. Experimental, may remove. + Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 + Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. + """ + model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=6, init_values=1e-5) + model = _create_vision_transformer( + 'vit_small_patch16_36x1_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch16_18x2_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Small w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. + Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 + Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. + """ + model_args = dict( + patch_size=16, embed_dim=384, depth=18, num_heads=6, init_values=1e-5, block_fn=ParallelThingsBlock) + model = _create_vision_transformer( + 'vit_small_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_18x2_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Base w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. + Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-5, block_fn=ParallelThingsBlock) + model = _create_vision_transformer( + 'vit_base_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva_large_patch14_196(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ EVA-large model https://arxiv.org/abs/2211.07636 /via MAE MIM pretrain""" + model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') + model = _create_vision_transformer( + 'eva_large_patch14_196', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def eva_large_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ EVA-large model https://arxiv.org/abs/2211.07636 via MAE MIM pretrain""" + model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') + model = _create_vision_transformer('eva_large_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def flexivit_small(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ FlexiViT-Small + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True) + model = _create_vision_transformer('flexivit_small', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def flexivit_base(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ FlexiViT-Base + """ + model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True) + model = _create_vision_transformer('flexivit_base', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def flexivit_large(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ FlexiViT-Large + """ + model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True) + model = _create_vision_transformer('flexivit_large', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled. + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, no_embed_class=True, + norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, + ) + model = _create_vision_transformer( + 'vit_base_patch16_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled. + """ + model_args = dict( + patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, no_embed_class=True, + norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, + ) + model = _create_vision_transformer( + 'vit_large_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_huge_patch14_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-Huge model (ViT-H/14) w/ parallel blocks and qk norm enabled. + """ + model_args = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, no_embed_class=True, + norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, + ) + model = _create_vision_transformer( + 'vit_huge_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-S/14 for DINOv2 + """ + model_args = dict(patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5) + model = _create_vision_transformer( + 'vit_small_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/14 for DINOv2 + """ + model_args = dict(patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5) + model = _create_vision_transformer( + 'vit_base_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-L/14 for DINOv2 + """ + model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5) + model = _create_vision_transformer( + 'vit_large_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_giant_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-G/14 for DINOv2 + """ + # The hidden_features of SwiGLU is calculated by: + # hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 + # When embed_dim=1536, hidden_features=4096 + # With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192 + model_args = dict( + patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, + mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, act_layer=nn.SiLU + ) + model = _create_vision_transformer( + 'vit_giant_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-S/14 for DINOv2 w/ 4 registers + """ + model_args = dict( + patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5, + reg_tokens=4, no_embed_class=True, + ) + model = _create_vision_transformer( + 'vit_small_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-B/14 for DINOv2 w/ 4 registers + """ + model_args = dict( + patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5, + reg_tokens=4, no_embed_class=True, + ) + model = _create_vision_transformer( + 'vit_base_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-L/14 for DINOv2 w/ 4 registers + """ + model_args = dict( + patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5, + reg_tokens=4, no_embed_class=True, + ) + model = _create_vision_transformer( + 'vit_large_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_giant_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT-G/14 for DINOv2 + """ + # The hidden_features of SwiGLU is calculated by: + # hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 + # When embed_dim=1536, hidden_features=4096 + # With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192 + model_args = dict( + patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, mlp_ratio=2.66667 * 2, + mlp_layer=SwiGLUPacked, act_layer=nn.SiLU, reg_tokens=4, no_embed_class=True, + ) + model = _create_vision_transformer( + 'vit_giant_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_512(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_512', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_large_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_large_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + # this is a corrected variant of the 384 with a res properly divisible by patch size (no padding/truncation) + model_args = dict( + patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_so400m_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_378(pretrained: bool = False, **kwargs) -> VisionTransformer: + # this is a corrected variant of the 384 with a res properly divisible by patch size (no padding/truncation) + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_378', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_siglip_gap_512(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_base_patch16_siglip_gap_512', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_large_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_patch16_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_large_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, + class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, + class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so400m_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_gap_378(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, + class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_gap_378', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, + class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_gap_448(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, + class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_gap_448', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so400m_patch14_siglip_gap_896(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" + model_args = dict( + patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, + class_token=False, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so400m_patch14_siglip_gap_896', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_wee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=256, depth=14, num_heads=4, init_values=1e-5, mlp_ratio=5, + class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_wee_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_pwee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=256, depth=16, num_heads=4, init_values=1e-5, mlp_ratio=5, + class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', block_fn=ParallelScalingBlock, + ) + model = _create_vision_transformer( + 'vit_pwee_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_little_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-5, mlp_ratio=5.6, + class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_little_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_little_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-5, mlp_ratio=5.6, + class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_little_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, init_values=1e-5, + class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_medium_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_medium_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, init_values=1e-5, + class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_medium_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_mediumd_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=512, depth=20, num_heads=8, init_values=1e-5, + class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_mediumd_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_mediumd_patch16_reg4_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=512, depth=20, num_heads=8, init_values=1e-5, + class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_mediumd_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_betwixt_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-5, + class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_betwixt_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_betwixt_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-5, + class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_betwixt_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_betwixt_patch16_reg4_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-5, + class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', + ) + model = _create_vision_transformer( + 'vit_betwixt_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, + no_embed_class=True, global_pool='avg', reg_tokens=4, + ) + model = _create_vision_transformer( + 'vit_base_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so150m_patch16_reg4_map_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, + class_token=False, reg_tokens=4, global_pool='map', + ) + model = _create_vision_transformer( + 'vit_so150m_patch16_reg4_map_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_so150m_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, + class_token=False, reg_tokens=4, global_pool='avg', fc_norm=False, + ) + model = _create_vision_transformer( + 'vit_so150m_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_intern300m_patch14_448(pretrained: bool = False, **kwargs) -> VisionTransformer: + model_args = dict( + patch_size=14, embed_dim=1024, depth=24, num_heads=16, + init_values=0.1, final_norm=False, dynamic_img_size=True, + ) + model = _create_vision_transformer( + 'vit_intern300m_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def test_vit(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT Test + """ + model_args = dict(patch_size=16, embed_dim=64, depth=6, num_heads=2, mlp_ratio=3, dynamic_img_size=True) + model = _create_vision_transformer('test_vit', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def test_vit2(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT Test + """ + model_args = dict( + patch_size=16, embed_dim=64, depth=8, num_heads=2, mlp_ratio=3, + class_token=False, reg_tokens=1, global_pool='avg', init_values=1e-5, dynamic_img_size=True) + model = _create_vision_transformer('test_vit2', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def test_vit3(pretrained: bool = False, **kwargs) -> VisionTransformer: + """ ViT Test + """ + model_args = dict( + patch_size=16, embed_dim=96, depth=9, num_heads=3, mlp_ratio=2, + class_token=False, reg_tokens=1, global_pool='map', init_values=1e-5) + model = _create_vision_transformer('test_vit3', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +register_model_deprecations(__name__, { + 'vit_tiny_patch16_224_in21k': 'vit_tiny_patch16_224.augreg_in21k', + 'vit_small_patch32_224_in21k': 'vit_small_patch32_224.augreg_in21k', + 'vit_small_patch16_224_in21k': 'vit_small_patch16_224.augreg_in21k', + 'vit_base_patch32_224_in21k': 'vit_base_patch32_224.augreg_in21k', + 'vit_base_patch16_224_in21k': 'vit_base_patch16_224.augreg_in21k', + 'vit_base_patch8_224_in21k': 'vit_base_patch8_224.augreg_in21k', + 'vit_large_patch32_224_in21k': 'vit_large_patch32_224.orig_in21k', + 'vit_large_patch16_224_in21k': 'vit_large_patch16_224.augreg_in21k', + 'vit_huge_patch14_224_in21k': 'vit_huge_patch14_224.orig_in21k', + 'vit_base_patch32_224_sam': 'vit_base_patch32_224.sam', + 'vit_base_patch16_224_sam': 'vit_base_patch16_224.sam', + 'vit_small_patch16_224_dino': 'vit_small_patch16_224.dino', + 'vit_small_patch8_224_dino': 'vit_small_patch8_224.dino', + 'vit_base_patch16_224_dino': 'vit_base_patch16_224.dino', + 'vit_base_patch8_224_dino': 'vit_base_patch8_224.dino', + 'vit_base_patch16_224_miil_in21k': 'vit_base_patch16_224_miil.in21k', + 'vit_base_patch32_224_clip_laion2b': 'vit_base_patch32_clip_224.laion2b', + 'vit_large_patch14_224_clip_laion2b': 'vit_large_patch14_clip_224.laion2b', + 'vit_huge_patch14_224_clip_laion2b': 'vit_huge_patch14_clip_224.laion2b', + 'vit_giant_patch14_224_clip_laion2b': 'vit_giant_patch14_clip_224.laion2b', +}) diff --git a/pytorch-image-models/timm/models/vision_transformer_hybrid.py b/pytorch-image-models/timm/models/vision_transformer_hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..4cf3a7664b29f88c369b2667d4c8c49a90f5b6ef --- /dev/null +++ b/pytorch-image-models/timm/models/vision_transformer_hybrid.py @@ -0,0 +1,408 @@ +""" Hybrid Vision Transformer (ViT) in PyTorch + +A PyTorch implement of the Hybrid Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +NOTE These hybrid model definitions depend on code in vision_transformer.py. +They were moved here to keep file sizes sane. + +Hacked together by / Copyright 2020, Ross Wightman +""" +import math +from functools import partial +from typing import Dict, List, Optional, Tuple, Type, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import StdConv2dSame, StdConv2d, ConvNormAct, to_2tuple, to_ntuple, HybridEmbed + +from ._builder import build_model_with_cfg +from ._registry import generate_default_cfgs, register_model, register_model_deprecations +from .resnet import resnet26d, resnet50d +from .resnetv2 import ResNetV2, create_resnetv2_stem +from .vision_transformer import VisionTransformer + + +class ConvStem(nn.Sequential): + def __init__( + self, + in_chans: int = 3, + depth: int = 3, + channels: Union[int, Tuple[int, ...]] = 64, + kernel_size: Union[int, Tuple[int, ...]] = 3, + stride: Union[int, Tuple[int, ...]] = (2, 2, 2), + padding: Union[str, int, Tuple[int, ...]] = "", + norm_layer: Type[nn.Module] = nn.BatchNorm2d, + act_layer: Type[nn.Module] = nn.ReLU, + ): + super().__init__() + if isinstance(channels, int): + # a default tiered channel strategy + channels = tuple([channels // 2**i for i in range(depth)][::-1]) + + kernel_size = to_ntuple(depth)(kernel_size) + padding = to_ntuple(depth)(padding) + assert depth == len(stride) == len(kernel_size) == len(channels) + + in_chs = in_chans + for i in range(len(channels)): + last_conv = i == len(channels) - 1 + self.add_module(f'{i}', ConvNormAct( + in_chs, + channels[i], + kernel_size=kernel_size[i], + stride=stride[i], + padding=padding[i], + bias=last_conv, + apply_norm=not last_conv, + apply_act=not last_conv, + norm_layer=norm_layer, + act_layer=act_layer, + )) + in_chs = channels[i] + + +def _resnetv2(layers=(3, 4, 9), **kwargs): + """ ResNet-V2 backbone helper""" + padding_same = kwargs.get('padding_same', True) + stem_type = 'same' if padding_same else '' + conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) + if len(layers): + backbone = ResNetV2( + layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), + preact=False, stem_type=stem_type, conv_layer=conv_layer) + else: + backbone = create_resnetv2_stem( + kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) + return backbone + + +def _convert_mobileclip(state_dict, model, prefix='image_encoder.model.'): + out = {} + for k, v in state_dict.items(): + if not k.startswith(prefix): + continue + k = k.replace(prefix, '') + k = k.replace('patch_emb.', 'patch_embed.backbone.') + k = k.replace('block.conv', 'conv') + k = k.replace('block.norm', 'bn') + k = k.replace('post_transformer_norm.', 'norm.') + k = k.replace('pre_norm_mha.0', 'norm1') + k = k.replace('pre_norm_mha.1', 'attn') + k = k.replace('pre_norm_ffn.0', 'norm2') + k = k.replace('pre_norm_ffn.1', 'mlp.fc1') + k = k.replace('pre_norm_ffn.4', 'mlp.fc2') + k = k.replace('qkv_proj.', 'qkv.') + k = k.replace('out_proj.', 'proj.') + k = k.replace('transformer.', 'blocks.') + if k == 'pos_embed.pos_embed.pos_embed': + k = 'pos_embed' + v = v.squeeze(0) + if 'classifier.proj' in k: + bias_k = k.replace('classifier.proj', 'head.bias') + k = k.replace('classifier.proj', 'head.weight') + v = v.T + out[bias_k] = torch.zeros(v.shape[0]) + out[k] = v + return out + + +def checkpoint_filter_fn( + state_dict: Dict[str, torch.Tensor], + model: VisionTransformer, + interpolation: str = 'bicubic', + antialias: bool = True, +) -> Dict[str, torch.Tensor]: + from .vision_transformer import checkpoint_filter_fn as _filter_fn + + if 'image_encoder.model.patch_emb.0.block.conv.weight' in state_dict: + state_dict = _convert_mobileclip(state_dict, model) + + return _filter_fn(state_dict, model, interpolation=interpolation, antialias=antialias) + + +def _create_vision_transformer_hybrid(variant, backbone, embed_args=None, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + embed_args = embed_args or {} + embed_layer = partial(HybridEmbed, backbone=backbone, **embed_args) + kwargs.setdefault('embed_layer', embed_layer) + kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set + return build_model_with_cfg( + VisionTransformer, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # hybrid in-1k models (weights from official JAX impl where they exist) + 'vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + hf_hub_id='timm/', + custom_load=True, + first_conv='patch_embed.backbone.conv'), + 'vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + hf_hub_id='timm/', + first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), + 'vit_small_r26_s32_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', + hf_hub_id='timm/', + custom_load=True, + ), + 'vit_small_r26_s32_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), + 'vit_base_r26_s32_224.untrained': _cfg(), + 'vit_base_r50_s16_384.orig_in21k_ft_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_r50_s32_224.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', + hf_hub_id='timm/', + custom_load=True, + ), + 'vit_large_r50_s32_384.augreg_in21k_ft_in1k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + hf_hub_id='timm/', + input_size=(3, 384, 384), crop_pct=1.0, custom_load=True, + ), + + # hybrid in-21k models (weights from official Google JAX impl where they exist) + 'vit_tiny_r_s16_p8_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv', custom_load=True), + 'vit_small_r26_s32_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + num_classes=21843, crop_pct=0.9, custom_load=True), + 'vit_base_r50_s16_224.orig_in21k': _cfg( + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', + hf_hub_id='timm/', + num_classes=0, crop_pct=0.9), + 'vit_large_r50_s32_224.augreg_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', + hf_hub_id='timm/', + num_classes=21843, crop_pct=0.9, custom_load=True), + + # hybrid models (using timm resnet backbones) + 'vit_small_resnet26d_224.untrained': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_small_resnet50d_s16_224.untrained': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet26d_224.untrained': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet50d_224.untrained': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + + 'vit_base_mci_224.apple_mclip_lt': _cfg( + hf_hub_id='apple/mobileclip_b_lt_timm', + url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_blt.pt', + num_classes=512, + mean=(0., 0., 0.), std=(1., 1., 1.), first_conv='patch_embed.backbone.0.conv', + ), + 'vit_base_mci_224.apple_mclip': _cfg( + hf_hub_id='apple/mobileclip_b_timm', + url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_b.pt', + num_classes=512, + mean=(0., 0., 0.), std=(1., 1., 1.), first_conv='patch_embed.backbone.0.conv', + ), +}) + + +@register_model +def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs) -> VisionTransformer: + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs) -> VisionTransformer: + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_args = dict(embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_r26_s32_384(pretrained=False, **kwargs) -> VisionTransformer: + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_args = dict(embed_dim=384, depth=12, num_heads=6) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: + """ R26+ViT-B/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_args = dict(embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer_hybrid( + 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_r50_s16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_args = dict(embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_r50_s16_384(pretrained=False, **kwargs) -> VisionTransformer: + """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_args = dict(embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_r50_s32_224(pretrained=False, **kwargs) -> VisionTransformer: + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_args = dict(embed_dim=1024, depth=24, num_heads=16) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_large_r50_s32_384(pretrained=False, **kwargs) -> VisionTransformer: + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_args = dict(embed_dim=1024, depth=24, num_heads=16) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: + """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_small_resnet50d_s16_224(pretrained=False, **kwargs) -> VisionTransformer: + """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) + model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: + """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_args = dict(embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_resnet50d_224(pretrained=False, **kwargs) -> VisionTransformer: + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_args = dict(embed_dim=768, depth=12, num_heads=12) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_base_mci_224(pretrained=False, **kwargs) -> VisionTransformer: + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = ConvStem( + channels=(768//4, 768//4, 768), + stride=(4, 2, 2), + kernel_size=(4, 2, 2), + padding=0, + in_chans=kwargs.get('in_chans', 3), + act_layer=nn.GELU, + ) + model_args = dict(embed_dim=768, depth=12, num_heads=12, no_embed_class=True) + model = _create_vision_transformer_hybrid( + 'vit_base_mci_224', backbone=backbone, embed_args=dict(proj=False), + pretrained=pretrained, **dict(model_args, **kwargs) + ) + return model + + +register_model_deprecations(__name__, { + 'vit_tiny_r_s16_p8_224_in21k': 'vit_tiny_r_s16_p8_224.augreg_in21k', + 'vit_small_r26_s32_224_in21k': 'vit_small_r26_s32_224.augreg_in21k', + 'vit_base_r50_s16_224_in21k': 'vit_base_r50_s16_224.orig_in21k', + 'vit_base_resnet50_224_in21k': 'vit_base_r50_s16_224.orig_in21k', + 'vit_large_r50_s32_224_in21k': 'vit_large_r50_s32_224.augreg_in21k', + 'vit_base_resnet50_384': 'vit_base_r50_s16_384.orig_in21k_ft_in1k' +}) diff --git a/pytorch-image-models/timm/models/vision_transformer_relpos.py b/pytorch-image-models/timm/models/vision_transformer_relpos.py new file mode 100644 index 0000000000000000000000000000000000000000..234195973f784654579c063ed3511ec129bdd78f --- /dev/null +++ b/pytorch-image-models/timm/models/vision_transformer_relpos.py @@ -0,0 +1,703 @@ +""" Relative Position Vision Transformer (ViT) in PyTorch + +NOTE: these models are experimental / WIP, expect changes + +Hacked together by / Copyright 2022, Ross Wightman +""" +import logging +import math +from functools import partial +from typing import List, Optional, Tuple, Type, Union + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +import torch +import torch.nn as nn +from torch.jit import Final +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn, LayerType +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import named_apply +from ._registry import generate_default_cfgs, register_model +from .vision_transformer import get_init_weights_vit + +__all__ = ['VisionTransformerRelPos'] # model_registry will add each entrypoint fn to this + +_logger = logging.getLogger(__name__) + + +class RelPosAttention(nn.Module): + fused_attn: Final[bool] + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_norm=False, + rel_pos_cls=None, + attn_drop=0., + proj_drop=0., + norm_layer=nn.LayerNorm, + ): + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + q = self.q_norm(q) + k = self.k_norm(k) + + if self.fused_attn: + if self.rel_pos is not None: + attn_bias = self.rel_pos.get_bias() + elif shared_rel_pos is not None: + attn_bias = shared_rel_pos + else: + attn_bias = None + + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + attn_mask=attn_bias, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + if self.rel_pos is not None: + attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) + elif shared_rel_pos is not None: + attn = attn + shared_rel_pos + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class RelPosBlock(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_norm=False, + rel_pos_cls=None, + init_values=None, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = RelPosAttention( + dim, + num_heads, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + rel_pos_cls=rel_pos_cls, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class ResPostRelPosBlock(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_norm=False, + rel_pos_cls=None, + init_values=None, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.init_values = init_values + + self.attn = RelPosAttention( + dim, + num_heads, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + rel_pos_cls=rel_pos_cls, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.init_weights() + + def init_weights(self): + # NOTE this init overrides that base model init with specific changes for the block type + if self.init_values is not None: + nn.init.constant_(self.norm1.weight, self.init_values) + nn.init.constant_(self.norm2.weight, self.init_values) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos))) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + return x + + +class VisionTransformerRelPos(nn.Module): + """ Vision Transformer w/ Relative Position Bias + + Differing from classic vit, this impl + * uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed + * defaults to no class token (can be enabled) + * defaults to global avg pool for head (can be changed) + * layer-scale (residual branch gain) enabled + """ + + def __init__( + self, + img_size: Union[int, Tuple[int, int]] = 224, + patch_size: Union[int, Tuple[int, int]] = 16, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: Literal['', 'avg', 'token', 'map'] = 'avg', + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4., + qkv_bias: bool = True, + qk_norm: bool = False, + init_values: Optional[float] = 1e-6, + class_token: bool = False, + fc_norm: bool = False, + rel_pos_type: str = 'mlp', + rel_pos_dim: Optional[int] = None, + shared_rel_pos: bool = False, + drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + weight_init: Literal['skip', 'jax', 'moco', ''] = 'skip', + fix_init: bool = False, + embed_layer: Type[nn.Module] = PatchEmbed, + norm_layer: Optional[LayerType] = None, + act_layer: Optional[LayerType] = None, + block_fn: Type[nn.Module] = RelPosBlock + ): + """ + Args: + img_size: input image size + patch_size: patch size + in_chans: number of input channels + num_classes: number of classes for classification head + global_pool: type of global pooling for final sequence (default: 'avg') + embed_dim: embedding dimension + depth: depth of transformer + num_heads: number of attention heads + mlp_ratio: ratio of mlp hidden dim to embedding dim + qkv_bias: enable bias for qkv if True + qk_norm: Enable normalization of query and key in attention + init_values: layer-scale init values + class_token: use class token (default: False) + fc_norm: use pre classifier norm instead of pre-pool + rel_pos_type: type of relative position + shared_rel_pos: share relative pos across all blocks + drop_rate: dropout rate + proj_drop_rate: projection dropout rate + attn_drop_rate: attention dropout rate + drop_path_rate: stochastic depth rate + weight_init: weight init scheme + fix_init: apply weight initialization fix (scaling w/ layer index) + embed_layer: patch embedding layer + norm_layer: normalization layer + act_layer: MLP activation layer + """ + super().__init__() + assert global_pool in ('', 'avg', 'token') + assert class_token or global_pool != 'token' + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.num_prefix_tokens = 1 if class_token else 0 + self.grad_checkpointing = False + + self.patch_embed = embed_layer( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + ) + feat_size = self.patch_embed.grid_size + r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size + + rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens) + if rel_pos_type.startswith('mlp'): + if rel_pos_dim: + rel_pos_args['hidden_dim'] = rel_pos_dim + if 'swin' in rel_pos_type: + rel_pos_args['mode'] = 'swin' + rel_pos_cls = partial(RelPosMlp, **rel_pos_args) + else: + rel_pos_cls = partial(RelPosBias, **rel_pos_args) + self.shared_rel_pos = None + if shared_rel_pos: + self.shared_rel_pos = rel_pos_cls(num_heads=num_heads) + # NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both... + rel_pos_cls = None + + self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + rel_pos_cls=rel_pos_cls, + init_values=init_values, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + ) + for i in range(depth)]) + self.feature_info = [ + dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] + self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity() + + # Classifier Head + self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity() + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if weight_init != 'skip': + self.init_weights(weight_init) + if fix_init: + self.fix_init_weight() + + def init_weights(self, mode=''): + assert mode in ('jax', 'moco', '') + if self.cls_token is not None: + nn.init.normal_(self.cls_token, std=1e-6) + named_apply(get_init_weights_vit(mode), self) + + def fix_init_weight(self): + def rescale(param, _layer_id): + param.div_(math.sqrt(2.0 * _layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'token') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + return_prefix_tokens: bool = False, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + return_prefix_tokens: Return both prefix and spatial intermediate tokens + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass + B, _, height, width = x.shape + x = self.patch_embed(x) + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + + shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x, shared_rel_pos=shared_rel_pos) + if i in take_indices: + # normalize intermediates with final norm layer if enabled + intermediates.append(self.norm(x) if norm else x) + + # process intermediates + if self.num_prefix_tokens: + # split prefix (e.g. class, distill) and spatial feature tokens + prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] + intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] + if reshape: + # reshape to BCHW output format + H, W = self.patch_embed.dynamic_feat_size((height, width)) + intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] + if not torch.jit.is_scripting() and return_prefix_tokens: + # return_prefix not support in torchscript due to poor type handling + intermediates = list(zip(intermediates, prefix_tokens)) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.fc_norm = nn.Identity() + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x) + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + + shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos) + else: + x = blk(x, shared_rel_pos=shared_rel_pos) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.fc_norm(x) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + model = build_model_with_cfg( + VisionTransformerRelPos, variant, pretrained, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth', + hf_hub_id='timm/', + input_size=(3, 256, 256)), + 'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)), + + 'vit_relpos_small_patch16_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth', + hf_hub_id='timm/'), + 'vit_relpos_medium_patch16_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth', + hf_hub_id='timm/'), + 'vit_relpos_base_patch16_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth', + hf_hub_id='timm/'), + + 'vit_srelpos_small_patch16_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth', + hf_hub_id='timm/'), + 'vit_srelpos_medium_patch16_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth', + hf_hub_id='timm/'), + + 'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth', + hf_hub_id='timm/'), + 'vit_relpos_base_patch16_cls_224.untrained': _cfg(), + 'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth', + hf_hub_id='timm/'), + + 'vit_relpos_small_patch16_rpn_224.untrained': _cfg(), + 'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth', + hf_hub_id='timm/'), + 'vit_relpos_base_patch16_rpn_224.untrained': _cfg(), +}) + + +@register_model +def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token + """ + model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock) + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token + """ + model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14) + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token + """ + model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True) + model = _create_vision_transformer_relpos( + 'vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token + """ + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True) + model = _create_vision_transformer_relpos( + 'vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True) + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token + """ + model_args = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False, + rel_pos_dim=384, shared_rel_pos=True) + model = _create_vision_transformer_relpos( + 'vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token + """ + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, + rel_pos_dim=512, shared_rel_pos=True) + model = _create_vision_transformer_relpos( + 'vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-M/16) w/ relative log-coord position, class token present + """ + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, + rel_pos_dim=256, class_token=True, global_pool='token') + model = _create_vision_transformer_relpos( + 'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token') + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present + NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled + Leaving here for comparisons w/ a future re-train as it performs quite well. + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True) + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token + """ + model_args = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock) + model = _create_vision_transformer_relpos( + 'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token + """ + model_args = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock) + model = _create_vision_transformer_relpos( + 'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: + """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock) + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/vision_transformer_sam.py b/pytorch-image-models/timm/models/vision_transformer_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..a57c166d750820135819ea01a7d2e9fdd9113b93 --- /dev/null +++ b/pytorch-image-models/timm/models/vision_transformer_sam.py @@ -0,0 +1,759 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in: + +'Exploring Plain Vision Transformer Backbones for Object Detection' + - https://arxiv.org/abs/2203.16527 + +'Segment Anything Model (SAM)' + - https://github.com/facebookresearch/segment-anything/ + +""" +import logging +from functools import partial +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead, \ + Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn +from torch.jit import Final + +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_function +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +# model_registry will add each entrypoint fn to this +__all__ = ['VisionTransformerSAM'] + + +_logger = logging.getLogger(__name__) + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + +register_notrace_function(get_rel_pos) + + +def get_decomposed_rel_pos_bias( + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py + Args: + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + bias (Tensor): attention bias to add to attention map + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + return attn_bias.reshape(-1, q_h * q_w, k_h * k_w) + + +class Attention(nn.Module): + fused_attn: Final[bool] + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=True, + qk_norm=False, + attn_drop=0., + proj_drop=0., + norm_layer=nn.LayerNorm, + use_rel_pos: bool = False, + input_size: Optional[Tuple[int, int]] = None, + rope: Optional[nn.Module] = None, + ): + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert rope is None + assert ( + input_size is not None + ), "Input size must be provided if using relative positional encoding." + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros( + 2 * input_size[0] - 1, self.head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros( + 2 * input_size[1] - 1, self.head_dim)) + self.rope = rope + + def forward(self, x): + B, H, W, _ = x.shape + N = H * W + x = x.reshape(B, N, -1) + qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # qkv with shape (3, B, nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0) + # q, k, v with shape (B * nHead, H * W, C) + q, k = self.q_norm(q), self.k_norm(k) + + if self.use_rel_pos: + attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + else: + attn_bias = None + if self.rope is not None: + rope = self.rope.get_embed() + q = apply_rot_embed_cat(q, rope).type_as(v) + k = apply_rot_embed_cat(k, rope).type_as(v) + + if self.fused_attn: + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + attn_mask=attn_bias, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + if attn_bias is not None: + attn = attn + attn_bias + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + x = x.view(B, H, W, -1) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class Block(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=True, + qk_norm=False, + proj_drop=0., + attn_drop=0., + init_values=None, + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + mlp_layer=Mlp, + use_rel_pos=False, + window_size=0, + input_size=None, + rope=None, + ): + super().__init__() + self.window_size = window_size + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + use_rel_pos=use_rel_pos, + input_size=input_size if window_size == 0 else (window_size, window_size), + rope=rope, + ) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = mlp_layer( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + B, H, W, _ = x.shape + + shortcut = x + x = self.norm1(x) + # Window partition + pad_hw: Optional[Tuple[int, int]] = None + if self.window_size > 0: + x, pad_hw = window_partition(x, self.window_size) + + x = self.drop_path1(self.ls1(self.attn(x))) + + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, (H, W), pad_hw) + + x = shortcut + x + + x = x.reshape(B, H * W, -1) # MLP is faster for N, L, C tensor + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + x = x.reshape(B, H, W, -1) + + return x + + +def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]] = None, +) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw if pad_hw is not None else hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + x = x[:, :H, :W, :].contiguous() + return x + + +class VisionTransformerSAM(nn.Module): + """ Vision Transformer for Segment-Anything Model(SAM) + + A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)` + - https://arxiv.org/abs/2010.11929 + """ + + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + num_classes: int = 768, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4., + qkv_bias: bool = True, + qk_norm: bool = False, + init_values: Optional[float] = None, + pre_norm: bool = False, + drop_rate: float = 0., + pos_drop_rate: float = 0., + patch_drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + weight_init: str = '', + embed_layer: Callable = partial(PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False), + norm_layer: Optional[Callable] = nn.LayerNorm, + act_layer: Optional[Callable] = nn.GELU, + block_fn: Callable = Block, + mlp_layer: Callable = Mlp, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + use_rope: bool = False, + window_size: int = 14, + global_attn_indexes: Tuple[int, ...] = (), + neck_chans: int = 256, + global_pool: str = 'avg', + head_hidden_size: Optional[int] = None, + ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None + ): + """ + Args: + img_size: Input image size. + patch_size: Patch size. + in_chans: Number of image input channels. + num_classes: Mumber of classes for classification head. + global_pool: Type of global pooling for final sequence (default: 'token'). + embed_dim: Transformer embedding dimension. + depth: Depth of transformer. + num_heads: Number of attention heads. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: Enable bias for qkv projections if True. + init_values: Layer-scale init values (layer-scale enabled if not None). + drop_rate: Head dropout rate. + pos_drop_rate: Position embedding dropout rate. + attn_drop_rate: Attention dropout rate. + drop_path_rate: Stochastic depth rate. + weight_init: Weight initialization scheme. + embed_layer: Patch embedding layer. + norm_layer: Normalization layer. + act_layer: MLP activation layer. + block_fn: Transformer block layer. + use_abs_pos: If True, use absolute positional embeddings. + use_rel_pos: If True, add relative positional embeddings to the attention map. + use_rope: If True, add rotary position embeddings to q/k in attention block. + window_size: Window size for window attention blocks. If 0, not use window attention. + global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0. + global_pool: Global pooling type. + head_hidden_size: If set, use NormMlpHead + ref_feat_shape: Tuple of reference feature shapes for ROPE, (global, local) + """ + super().__init__() + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.grad_checkpointing = False + + self.patch_embed = embed_layer( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + bias=not pre_norm, # disable bias if pre-norm is used + ) + grid_size = self.patch_embed.grid_size + r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size + + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=pos_drop_rate) + if patch_drop_rate > 0: + self.patch_drop = PatchDropout( + patch_drop_rate, + num_prefix_tokens=0, + ) + else: + self.patch_drop = nn.Identity() + self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() + + if use_rope: + assert not use_rel_pos, "ROPE and relative pos embeddings should not be enabled at same time" + if ref_feat_shape is not None: + assert len(ref_feat_shape) == 2 + ref_feat_shape_global = to_2tuple(ref_feat_shape[0]) + ref_feat_shape_window = to_2tuple(ref_feat_shape[1]) + else: + ref_feat_shape_global = ref_feat_shape_window = None + self.rope_global = RotaryEmbeddingCat( + embed_dim // num_heads, + in_pixels=False, + feat_shape=grid_size, + ref_feat_shape=ref_feat_shape_global, + ) + self.rope_window = RotaryEmbeddingCat( + embed_dim // num_heads, + in_pixels=False, + feat_shape=to_2tuple(window_size), + ref_feat_shape=ref_feat_shape_window, + ) + else: + self.rope_global = None + self.rope_window = None + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + self.blocks = nn.Sequential(*[ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + init_values=init_values, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + mlp_layer=mlp_layer, + use_rel_pos=use_rel_pos, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=grid_size, + rope=self.rope_window if i not in global_attn_indexes else self.rope_global, + ) + for i in range(depth)]) + self.feature_info = [ + dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] + + if neck_chans: + self.neck = nn.Sequential( + nn.Conv2d( + embed_dim, + neck_chans, + kernel_size=1, + bias=False, + ), + LayerNorm2d(neck_chans), + nn.Conv2d( + neck_chans, + neck_chans, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(neck_chans), + ) + self.num_features = neck_chans + else: + if head_hidden_size: + self.neck = nn.Identity() + else: + # should have a final norm with standard ClassifierHead + self.neck = LayerNorm2d(embed_dim) + neck_chans = embed_dim + + # Classifier Head + if head_hidden_size: + self.head = NormMlpClassifierHead( + neck_chans, + num_classes, + hidden_size=head_hidden_size, + pool_type=global_pool, + drop_rate=drop_rate, + ) + else: + self.head = ClassifierHead( + neck_chans, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'dist_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt == 'NCHW', 'Output shape for ViT-SAM must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass, collect intermediates + x = self.patch_embed(x) + if self.pos_embed is not None: + # dynamically resize abs pos embedding if needed + x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) + x = self.pos_drop(x) + x = self.patch_drop(x) + x = self.norm_pre(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x) + if i in take_indices: + # make output BCHW + if norm: + # norm is intertwined with neck convs so apply both, changes the dim + # FIXME only apply to final? Need experiments + intermediates.append(self.neck(x.permute(0, 3, 1, 2))) + else: + intermediates.append(x.permute(0, 3, 1, 2)) + + if intermediates_only: + return intermediates + + x = self.neck(x.permute(0, 3, 1, 2)) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Optional[Union[int, List[int]]] = None, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + # neck is being treated as equivalent to final norm here + self.neck = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x) + if self.pos_embed is not None: + # dynamically resize abs pos embedding if needed + x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) + x = self.pos_drop(x) + x = self.patch_drop(x) + x = self.norm_pre(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.neck(x.permute(0, 3, 1, 2)) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn( + state_dict, + model, +): + """ Remap SAM checkpoints -> timm """ + sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict + out_dict = {} + for k, v in state_dict.items(): + if k.startswith('image_encoder.'): + k = k[14:] + k = k.replace('mlp.lin', 'mlp.fc') + else: + if sam_checkpoint: + continue + out_dict[k] = v + return out_dict + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + + # Segment-Anyhing Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only) + 'samvit_base_patch16.sa1b': _cfg( + url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 1024, 1024), crop_pct=1.0), + 'samvit_large_patch16.sa1b': _cfg( + url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 1024, 1024), crop_pct=1.0), + 'samvit_huge_patch16.sa1b': _cfg( + url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth', + hf_hub_id='timm/', + license='apache-2.0', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, + input_size=(3, 1024, 1024), crop_pct=1.0), + + 'samvit_base_patch16_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000, + input_size=(3, 224, 224), crop_pct=0.9), +}) + + +def _create_vision_transformer(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + return build_model_with_cfg( + VisionTransformerSAM, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +@register_model +def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: + """ ViT-B/16 for Segment-Anything + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], + window_size=14, use_rel_pos=True, img_size=1024, + ) + model = _create_vision_transformer( + 'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: + """ ViT-L/16 for Segment-Anything + """ + model_args = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23], + window_size=14, use_rel_pos=True, img_size=1024, + ) + model = _create_vision_transformer( + 'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: + """ ViT-H/16 for Segment-Anything + """ + model_args = dict( + patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31], + window_size=14, use_rel_pos=True, img_size=1024, + ) + model = _create_vision_transformer( + 'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM: + """ ViT-B/16 based on samvit arch + """ + model_args = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], + window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None, + ) + model = _create_vision_transformer( + 'samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + diff --git a/pytorch-image-models/timm/models/vitamin.py b/pytorch-image-models/timm/models/vitamin.py new file mode 100644 index 0000000000000000000000000000000000000000..18635f60389b8e3a9d727ec41bf6ab3e4722db24 --- /dev/null +++ b/pytorch-image-models/timm/models/vitamin.py @@ -0,0 +1,602 @@ +""" ViTamin + +Paper: Designing Scalable Vison Models in the Vision-Language Era +A family of model weights on Huggingface: https://huggingface.co/collections/jienengchen/vitamin-family-661048126b72debdaca060bf + +@inproceedings{chen2024vitamin, + title={ViTamin: Designing Scalable Vision Models in the Vision-language Era}, + author={Chen, Jieneng and Yu, Qihang and Shen, Xiaohui and Yuille, Alan and Chen, Liang-Chieh}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + year={2024} +} + +Based on Apache 2.0 licensed code at https://github.com/ViTamin/ViTamin + +Modifications and timm support by Jieneng Chen 2024 + +Reference: +https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py +https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer_hybrid.py +""" + +import math +from dataclasses import dataclass, field +from functools import partial +from typing import Optional, Union, Tuple + +import torch +import torch.nn as nn + +from timm.data import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD +from timm.layers import create_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, \ + make_divisible, DropPath, HybridEmbed +from ._builder import build_model_with_cfg +from ._manipulate import named_apply, checkpoint_seq +from ._registry import register_model, generate_default_cfgs +from .vision_transformer import VisionTransformer, checkpoint_filter_fn + + +@dataclass +class VitConvCfg: + expand_ratio: float = 4.0 + expand_output: bool = True # calculate expansion channels from output (vs input chs) + kernel_size: int = 3 + group_size: int = 1 # 1 == depthwise + pre_norm_act: bool = False # activation after pre-norm + stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw' + pool_type: str = 'avg2' + downsample_pool_type: str = 'avg2' + act_layer: str = 'gelu' # stem & stage 1234 + norm_layer: str = '' + norm_eps: float = 1e-5 + down_shortcut: Optional[bool] = True + mlp: str = 'mlp' + + +@dataclass +class VitCfg: + embed_dim: Tuple[Union[int, Tuple[int, ...]], ...] = (96, 192, 384, 768) + depths: Tuple[Union[int, Tuple[int, ...]], ...] = (2, 3, 5, 2) + stem_width: int = 64 + conv_cfg: VitConvCfg = field(default_factory=VitConvCfg) + head_type: str = "" + + +def _init_conv(module, name, scheme=''): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +class Stem(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + act_layer: str = 'gelu', + norm_layer: str = 'layernorm2d', + norm_eps: float = 1e-6, + bias: bool = True, + ): + super().__init__() + norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) + self.out_chs = out_chs + + self.conv1 = create_conv2d(in_chs, out_chs, 3, stride=2, bias=bias) + self.norm1 = norm_act_layer(out_chs) + self.conv2 = create_conv2d(out_chs, out_chs, 3, stride=1, bias=bias) + + named_apply(_init_conv, self) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + return x + + +class Downsample2d(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + pool_type: str = 'avg2', + bias: bool = True, + ): + super().__init__() + self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False) + + if dim != dim_out: + self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) # 1x1 conv + else: + self.expand = nn.Identity() + + def forward(self, x): + x = self.pool(x) # spatial downsample + x = self.expand(x) # expand chs + return x + + +class StridedConv(nn.Module): + """ downsample 2d as well + """ + def __init__( + self, + kernel_size=3, + stride=2, + padding=1, + in_chans=3, + embed_dim=768 + ): + super().__init__() + norm_layer = partial(get_norm_layer('layernorm2d'), eps=1e-6) + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) + self.norm = norm_layer(in_chans) # affine over C + + def forward(self, x): + x = self.norm(x) + x = self.proj(x) + return x + + +class MbConvLNBlock(nn.Module): + """ Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand) + """ + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + drop_path: float = 0., + kernel_size: int = 3, + norm_layer: str = 'layernorm2d', + norm_eps: float = 1e-6, + act_layer: str = 'gelu', + expand_ratio: float = 4.0, + ): + super(MbConvLNBlock, self).__init__() + self.stride, self.in_chs, self.out_chs = stride, in_chs, out_chs + mid_chs = make_divisible(out_chs * expand_ratio) + prenorm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) + + if stride == 2: + self.shortcut = Downsample2d(in_chs, out_chs, pool_type='avg', bias=True) + elif in_chs != out_chs: + self.shortcut = nn.Conv2d(in_chs, out_chs, 1, bias=True) + else: + self.shortcut = nn.Identity() + + self.pre_norm = prenorm_act_layer(in_chs, apply_act=False) + self.down = nn.Identity() + self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=1, bias=True) + self.act1 = create_act_layer(act_layer, inplace=True) + self.conv2_kxk = create_conv2d( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=1, groups=mid_chs, bias=True) + self.act2 = create_act_layer(act_layer, inplace=True) + self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=True) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + + def init_weights(self, scheme=''): + named_apply(partial(_init_conv, scheme=scheme), self) + + def forward(self, x): + shortcut = self.shortcut(x) + + x = self.pre_norm(x) + x = self.down(x) # nn.Identity() + + # 1x1 expansion conv & act + x = self.conv1_1x1(x) + x = self.act1(x) + + # (strided) depthwise 3x3 conv & act + x = self.conv2_kxk(x) + x = self.act2(x) + + # 1x1 linear projection to output width + x = self.conv3_1x1(x) + x = self.drop_path(x) + shortcut + + return x + + +class MbConvStages(nn.Module): + """ MobileConv for stage 1 and stage 2 of ViTamin + """ + def __init__( + self, + cfg: VitCfg, + img_size: Union[int, Tuple[int, int]] = 224, # place holder + in_chans: int = 3, + ): + super().__init__() + self.grad_checkpointing = False + + self.stem = Stem( + in_chs=in_chans, + out_chs=cfg.stem_width, + ) + + stages = [] + self.num_stages = len(cfg.embed_dim) + for s, dim in enumerate(cfg.embed_dim[:2]): # stage + stage_in_chs = cfg.embed_dim[s-1] if s>0 else cfg.stem_width + blocks = [ + MbConvLNBlock( + in_chs = stage_in_chs if d==0 else dim, + out_chs = dim, + stride = 2 if d == 0 else 1, + ) + for d in range(cfg.depths[s]) + ] + stages += [nn.Sequential(*blocks)] + self.stages = nn.Sequential(*stages) + + self.pool = StridedConv( + stride=2, + in_chans=cfg.embed_dim[1], + embed_dim=cfg.embed_dim[2] + ) + + def forward(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + x = self.pool(x) + return x + + +class GeGluMlp(nn.Module): + def __init__( + self, + in_features, + hidden_features, + act_layer = 'gelu', + drop = 0.0, + ): + super().__init__() + norm_layer = partial(get_norm_layer('layernorm'), eps=1e-6) + + self.norm = norm_layer(in_features) + self.w0 = nn.Linear(in_features, hidden_features) + self.act = create_act_layer(act_layer) + self.w1 = nn.Linear(in_features, hidden_features) + self.w2 = nn.Linear(hidden_features, in_features) + + def forward(self, x): + x = self.norm(x) + x = self.act(self.w0(x)) * self.w1(x) + x = self.w2(x) + return x + + +def _create_vitamin(variant, pretrained=False, embed_cfg=None, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + assert embed_cfg is not None + backbone = MbConvStages(cfg=embed_cfg, in_chans=kwargs.get('in_chans', 3)) + kwargs['embed_layer'] = partial(HybridEmbed, backbone=backbone, proj=False) + kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set + + return build_model_with_cfg( + VisionTransformer, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, + 'first_conv': 'patch_embed.backbone.stem.conv1', + 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'vitamin_small_224.datacomp1b_clip_ltt': _cfg( + hf_hub_id='jienengchen/ViTamin-S-LTT', num_classes=384), + 'vitamin_small_224.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-S', num_classes=384), + 'vitamin_base_224.datacomp1b_clip_ltt': _cfg( + hf_hub_id='jienengchen/ViTamin-B-LTT', num_classes=768), + 'vitamin_base_224.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-B', num_classes=768), + 'vitamin_large_224.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L-224px', num_classes=768), + 'vitamin_large_256.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L-256px', num_classes=768, + input_size=(3, 256, 256), crop_pct=1.0), + 'vitamin_large_336.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L-336px', num_classes=768, + input_size=(3, 336, 336), crop_pct=1.0), + 'vitamin_large_384.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L-384px', num_classes=768, + input_size=(3, 384, 384), crop_pct=1.0), + 'vitamin_large2_224.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L2-224px', num_classes=1024), + 'vitamin_large2_256.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L2-256px', num_classes=1024, + input_size=(3, 256, 256), crop_pct=1.0), + 'vitamin_large2_336.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L2-336px', num_classes=1024, + input_size=(3, 336, 336), crop_pct=1.0), + 'vitamin_large2_384.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-L2-384px', num_classes=1024, + input_size=(3, 384, 384), crop_pct=1.0), + 'vitamin_xlarge_256.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-XL-256px', num_classes=1152, + input_size=(3, 256, 256), crop_pct=1.0), + 'vitamin_xlarge_336.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-XL-336px', num_classes=1152, + input_size=(3, 336, 336), crop_pct=1.0), + 'vitamin_xlarge_384.datacomp1b_clip': _cfg( + hf_hub_id='jienengchen/ViTamin-XL-384px', num_classes=1152, + input_size=(3, 384, 384), crop_pct=1.0), +}) + + +@register_model +def vitamin_small_224(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(64, 128, 384), + depths=(2, 4, 1), + stem_width=64, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + embed_dim=384, depth=14, num_heads=6, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg + ) + model = _create_vitamin('vitamin_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_base_224(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(128, 256, 768), + depths=(2, 4, 1), + stem_width=128, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + embed_dim=768, depth=14, num_heads=12, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg) + model = _create_vitamin('vitamin_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large_224(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg, + ) + model = _create_vitamin('vitamin_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large_256(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=256, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg) + model = _create_vitamin('vitamin_large_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large_336(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=336, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg + ) + model = _create_vitamin('vitamin_large_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large_384(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=384, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg) + model = _create_vitamin('vitamin_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large2_224(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg, + ) + model = _create_vitamin('vitamin_large2_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large2_256(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=256, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg) + model = _create_vitamin('vitamin_large2_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large2_336(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=336, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg + ) + model = _create_vitamin('vitamin_large2_336', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_large2_384(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(160, 320, 1024), + depths=(2, 4, 1), + stem_width=160, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=384, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', embed_cfg=embed_cfg) + model = _create_vitamin('vitamin_large2_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_xlarge_256(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg=VitCfg( + embed_dim=(192, 384, 1152), + depths=(2, 4, 1), + stem_width=192, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=256, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) + model = _create_vitamin( + 'vitamin_xlarge_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_xlarge_336(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(192, 384, 1152), + depths=(2, 4, 1), + stem_width=192, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=336, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) + model = _create_vitamin('vitamin_xlarge_256', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def vitamin_xlarge_384(pretrained=False, **kwargs) -> VisionTransformer: + embed_cfg = VitCfg( + embed_dim=(192, 384, 1152), + depths=(2, 4, 1), + stem_width=192, + conv_cfg=VitConvCfg( + norm_layer='layernorm2d', + norm_eps=1e-6, + ), + head_type='1d', + ) + model_args = dict( + img_size=384, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., + class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) + model = _create_vitamin('vitamin_xlarge_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model \ No newline at end of file diff --git a/pytorch-image-models/timm/models/volo.py b/pytorch-image-models/timm/models/volo.py new file mode 100644 index 0000000000000000000000000000000000000000..0d273180fb0fc8b044dcf10b41f480456b4fce55 --- /dev/null +++ b/pytorch-image-models/timm/models/volo.py @@ -0,0 +1,994 @@ +""" Vision OutLOoker (VOLO) implementation + +Paper: `VOLO: Vision Outlooker for Visual Recognition` - https://arxiv.org/abs/2106.13112 + +Code adapted from official impl at https://github.com/sail-sg/volo, original copyright in comment below + +Modifications and additions for timm by / Copyright 2022, Ross Wightman +""" +# Copyright 2021 Sea Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_, use_fused_attn +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._registry import register_model, generate_default_cfgs + +__all__ = ['VOLO'] # model_registry will add each entrypoint fn to this + + +class OutlookAttention(nn.Module): + + def __init__( + self, + dim, + num_heads, + kernel_size=3, + padding=1, + stride=1, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + ): + super().__init__() + head_dim = dim // num_heads + self.num_heads = num_heads + self.kernel_size = kernel_size + self.padding = padding + self.stride = stride + self.scale = head_dim ** -0.5 + + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride) + self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) + + def forward(self, x): + B, H, W, C = x.shape + + v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W + + h, w = math.ceil(H / self.stride), math.ceil(W / self.stride) + v = self.unfold(v).reshape( + B, self.num_heads, C // self.num_heads, + self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H + + attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + attn = self.attn(attn).reshape( + B, h * w, self.num_heads, self.kernel_size * self.kernel_size, + self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk + attn = attn * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w) + x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride) + + x = self.proj(x.permute(0, 2, 3, 1)) + x = self.proj_drop(x) + + return x + + +class Outlooker(nn.Module): + def __init__( + self, + dim, + kernel_size, + padding, + stride=1, + num_heads=1, + mlp_ratio=3., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + qkv_bias=False, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = OutlookAttention( + dim, + num_heads, + kernel_size=kernel_size, + padding=padding, + stride=stride, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.attn(self.norm1(x))) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + return x + + +class Attention(nn.Module): + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, H, W, C = x.shape + + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + if self.fused_attn: + x = F.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, H, W, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Transformer(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.attn(self.norm1(x))) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + return x + + +class ClassAttention(nn.Module): + + def __init__( + self, + dim, + num_heads=8, + head_dim=None, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + ): + super().__init__() + self.num_heads = num_heads + if head_dim is not None: + self.head_dim = head_dim + else: + head_dim = dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias) + self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(self.head_dim * self.num_heads, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + + kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) * self.scale + + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads) + cls_embed = self.proj(cls_embed) + cls_embed = self.proj_drop(cls_embed) + return cls_embed + + +class ClassBlock(nn.Module): + + def __init__( + self, + dim, + num_heads, + head_dim=None, + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = ClassAttention( + dim, + num_heads=num_heads, + head_dim=head_dim, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=drop, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + cls_embed = x[:, :1] + cls_embed = cls_embed + self.drop_path1(self.attn(self.norm1(x))) + cls_embed = cls_embed + self.drop_path2(self.mlp(self.norm2(cls_embed))) + return torch.cat([cls_embed, x[:, 1:]], dim=1) + + +def get_block(block_type, **kargs): + if block_type == 'ca': + return ClassBlock(**kargs) + + +def rand_bbox(size, lam, scale=1): + """ + get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling) + return: bounding box + """ + W = size[1] // scale + H = size[2] // scale + cut_rat = np.sqrt(1. - lam) + cut_w = (W * cut_rat).astype(int) + cut_h = (H * cut_rat).astype(int) + + # uniform + cx = np.random.randint(W) + cy = np.random.randint(H) + + bbx1 = np.clip(cx - cut_w // 2, 0, W) + bby1 = np.clip(cy - cut_h // 2, 0, H) + bbx2 = np.clip(cx + cut_w // 2, 0, W) + bby2 = np.clip(cy + cut_h // 2, 0, H) + + return bbx1, bby1, bbx2, bby2 + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding. + Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding + """ + + def __init__( + self, + img_size=224, + stem_conv=False, + stem_stride=1, + patch_size=8, + in_chans=3, + hidden_dim=64, + embed_dim=384, + ): + super().__init__() + assert patch_size in [4, 8, 16] + if stem_conv: + self.conv = nn.Sequential( + nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + ) + else: + self.conv = None + + self.proj = nn.Conv2d( + hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride) + self.num_patches = (img_size // patch_size) * (img_size // patch_size) + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + x = self.proj(x) # B, C, H, W + return x + + +class Downsample(nn.Module): + """ Image to Patch Embedding, downsampling between stage1 and stage2 + """ + + def __init__(self, in_embed_dim, out_embed_dim, patch_size=2): + super().__init__() + self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.proj(x) # B, C, H, W + x = x.permute(0, 2, 3, 1) + return x + + +def outlooker_blocks( + block_fn, + index, + dim, + layers, + num_heads=1, + kernel_size=3, + padding=1, + stride=2, + mlp_ratio=3., + qkv_bias=False, + attn_drop=0, + drop_path_rate=0., + **kwargs, +): + """ + generate outlooker layer in stage1 + return: outlooker layers + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append(block_fn( + dim, + kernel_size=kernel_size, + padding=padding, + stride=stride, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + drop_path=block_dpr, + )) + blocks = nn.Sequential(*blocks) + return blocks + + +def transformer_blocks( + block_fn, + index, + dim, + layers, + num_heads, + mlp_ratio=3., + qkv_bias=False, + attn_drop=0, + drop_path_rate=0., + **kwargs, +): + """ + generate transformer layers in stage2 + return: transformer layers + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append(block_fn( + dim, + num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + drop_path=block_dpr, + )) + blocks = nn.Sequential(*blocks) + return blocks + + +class VOLO(nn.Module): + """ + Vision Outlooker, the main class of our model + """ + + def __init__( + self, + layers, + img_size=224, + in_chans=3, + num_classes=1000, + global_pool='token', + patch_size=8, + stem_hidden_dim=64, + embed_dims=None, + num_heads=None, + downsamples=(True, False, False, False), + outlook_attention=(True, False, False, False), + mlp_ratio=3.0, + qkv_bias=False, + drop_rate=0., + pos_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + post_layers=('ca', 'ca'), + use_aux_head=True, + use_mix_token=False, + pooling_scale=2, + ): + super().__init__() + num_layers = len(layers) + mlp_ratio = to_ntuple(num_layers)(mlp_ratio) + img_size = to_2tuple(img_size) + + self.num_classes = num_classes + self.global_pool = global_pool + self.mix_token = use_mix_token + self.pooling_scale = pooling_scale + self.num_features = self.head_hidden_size = embed_dims[-1] + if use_mix_token: # enable token mixing, see token labeling for details. + self.beta = 1.0 + assert global_pool == 'token', "return all tokens if mix_token is enabled" + self.grad_checkpointing = False + + self.patch_embed = PatchEmbed( + stem_conv=True, + stem_stride=2, + patch_size=patch_size, + in_chans=in_chans, + hidden_dim=stem_hidden_dim, + embed_dim=embed_dims[0], + ) + r = patch_size + + # inital positional encoding, we add positional encoding after outlooker blocks + patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale) + self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1])) + self.pos_drop = nn.Dropout(p=pos_drop_rate) + + # set the main block in network + self.stage_ends = [] + self.feature_info = [] + network = [] + block_idx = 0 + for i in range(len(layers)): + if outlook_attention[i]: + # stage 1 + stage = outlooker_blocks( + Outlooker, + i, + embed_dims[i], + layers, + num_heads[i], + mlp_ratio=mlp_ratio[i], + qkv_bias=qkv_bias, + attn_drop=attn_drop_rate, + norm_layer=norm_layer, + ) + else: + # stage 2 + stage = transformer_blocks( + Transformer, + i, + embed_dims[i], + layers, + num_heads[i], + mlp_ratio=mlp_ratio[i], + qkv_bias=qkv_bias, + drop_path_rate=drop_path_rate, + attn_drop=attn_drop_rate, + norm_layer=norm_layer, + ) + network.append(stage) + self.stage_ends.append(block_idx) + self.feature_info.append(dict(num_chs=embed_dims[i], reduction=r, module=f'network.{block_idx}')) + block_idx += 1 + if downsamples[i]: + # downsampling between two stages + network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2)) + r *= 2 + block_idx += 1 + + self.network = nn.ModuleList(network) + + # set post block, for example, class attention layers + self.post_network = None + if post_layers is not None: + self.post_network = nn.ModuleList([ + get_block( + post_layers[i], + dim=embed_dims[-1], + num_heads=num_heads[-1], + mlp_ratio=mlp_ratio[-1], + qkv_bias=qkv_bias, + attn_drop=attn_drop_rate, + drop_path=0., + norm_layer=norm_layer) + for i in range(len(post_layers)) + ]) + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1])) + trunc_normal_(self.cls_token, std=.02) + + # set output type + if use_aux_head: + self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + else: + self.aux_head = None + self.norm = norm_layer(self.num_features) + + # Classifier head + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[ + (r'^network\.(\d+)\.(\d+)', None), + (r'^network\.(\d+)', (0,)), + ], + blocks2=[ + (r'^cls_token', (0,)), + (r'^post_network\.(\d+)', None), + (r'^norm', (99999,)) + ], + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + if self.aux_head is not None: + self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_tokens(self, x): + for idx, block in enumerate(self.network): + if idx == 2: + # add positional encoding after outlooker blocks + x = x + self.pos_embed + x = self.pos_drop(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(block, x) + else: + x = block(x) + + B, H, W, C = x.shape + x = x.reshape(B, -1, C) + return x + + def forward_cls(self, x): + B, N, C = x.shape + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat([cls_tokens, x], dim=1) + for block in self.post_network: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(block, x) + else: + x = block(x) + return x + + def forward_train(self, x): + """ A separate forward fn for training with mix_token (if a train script supports). + Combining multiple modes in as single forward with different return types is torchscript hell. + """ + x = self.patch_embed(x) + x = x.permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C + + # mix token, see token labeling for details. + if self.mix_token and self.training: + lam = np.random.beta(self.beta, self.beta) + patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale + bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale) + temp_x = x.clone() + sbbx1, sbby1 = self.pooling_scale * bbx1, self.pooling_scale * bby1 + sbbx2, sbby2 = self.pooling_scale * bbx2, self.pooling_scale * bby2 + temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :] + x = temp_x + else: + bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0 + + # step2: tokens learning in the two stages + x = self.forward_tokens(x) + + # step3: post network, apply class attention or not + if self.post_network is not None: + x = self.forward_cls(x) + x = self.norm(x) + + if self.global_pool == 'avg': + x_cls = x.mean(dim=1) + elif self.global_pool == 'token': + x_cls = x[:, 0] + else: + x_cls = x + + if self.aux_head is None: + return x_cls + + x_aux = self.aux_head(x[:, 1:]) # generate classes in all feature tokens, see token labeling + if not self.training: + return x_cls + 0.5 * x_aux.max(1)[0] + + if self.mix_token and self.training: # reverse "mix token", see token labeling for details. + x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]) + temp_x = x_aux.clone() + temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :] + x_aux = temp_x + x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]) + + # return these: 1. class token, 2. classes from all feature tokens, 3. bounding box + return x_cls, x_aux, (bbx1, bby1, bbx2, bby2) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output format must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + take_indices = [self.stage_ends[i] for i in take_indices] + max_index = self.stage_ends[max_index] + + # forward pass + B, _, height, width = x.shape + x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C + + # step2: tokens learning in the two stages + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + network = self.network + else: + network = self.network[:max_index + 1] + for idx, block in enumerate(network): + if idx == 2: + # add positional encoding after outlooker blocks + x = x + self.pos_embed + x = self.pos_drop(x) + x = block(x) + if idx in take_indices: + if norm and idx >= 2: + x_inter = self.norm(x) + else: + x_inter = x + intermediates.append(x_inter.permute(0, 3, 1, 2)) + + if intermediates_only: + return intermediates + + # NOTE not supporting return of class tokens + # step3: post network, apply class attention or not + B, H, W, C = x.shape + x = x.reshape(B, -1, C) + if self.post_network is not None: + x = self.forward_cls(x) + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + max_index = self.stage_ends[max_index] + self.network = self.network[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.post_network = nn.ModuleList() # prune token blocks with head + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C + + # step2: tokens learning in the two stages + x = self.forward_tokens(x) + + # step3: post network, apply class attention or not + if self.post_network is not None: + x = self.forward_cls(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + out = x.mean(dim=1) + elif self.global_pool == 'token': + out = x[:, 0] + else: + out = x + x = self.head_drop(x) + if pre_logits: + return out + out = self.head(out) + if self.aux_head is not None: + # generate classes in all feature tokens, see token labeling + aux = self.aux_head(x[:, 1:]) + out = out + 0.5 * aux.max(1)[0] + return out + + def forward(self, x): + """ simplified forward (without mix token training) """ + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_volo(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + return build_model_with_cfg( + VOLO, + variant, + pretrained, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .96, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'), + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'volo_d1_224.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar', + crop_pct=0.96), + 'volo_d1_384.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar', + crop_pct=1.0, input_size=(3, 384, 384)), + 'volo_d2_224.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar', + crop_pct=0.96), + 'volo_d2_384.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar', + crop_pct=1.0, input_size=(3, 384, 384)), + 'volo_d3_224.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar', + crop_pct=0.96), + 'volo_d3_448.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar', + crop_pct=1.0, input_size=(3, 448, 448)), + 'volo_d4_224.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar', + crop_pct=0.96), + 'volo_d4_448.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar', + crop_pct=1.15, input_size=(3, 448, 448)), + 'volo_d5_224.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar', + crop_pct=0.96), + 'volo_d5_448.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar', + crop_pct=1.15, input_size=(3, 448, 448)), + 'volo_d5_512.sail_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar', + crop_pct=1.15, input_size=(3, 512, 512)), +}) + + +@register_model +def volo_d1_224(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D1 model, Params: 27M """ + model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) + model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d1_384(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D1 model, Params: 27M """ + model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) + model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d2_224(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D2 model, Params: 59M """ + model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d2_384(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D2 model, Params: 59M """ + model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d3_224(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D3 model, Params: 86M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d3_448(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D3 model, Params: 86M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d4_224(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D4 model, Params: 193M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) + model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d4_448(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D4 model, Params: 193M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) + model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d5_224(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D5 model, Params: 296M + stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 + """ + model_args = dict( + layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), + mlp_ratio=4, stem_hidden_dim=128, **kwargs) + model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d5_448(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D5 model, Params: 296M + stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 + """ + model_args = dict( + layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), + mlp_ratio=4, stem_hidden_dim=128, **kwargs) + model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d5_512(pretrained=False, **kwargs) -> VOLO: + """ VOLO-D5 model, Params: 296M + stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 + """ + model_args = dict( + layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), + mlp_ratio=4, stem_hidden_dim=128, **kwargs) + model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args) + return model diff --git a/pytorch-image-models/timm/models/vovnet.py b/pytorch-image-models/timm/models/vovnet.py new file mode 100644 index 0000000000000000000000000000000000000000..86851666a255943863a915750ebf3f6ed9ad99d7 --- /dev/null +++ b/pytorch-image-models/timm/models/vovnet.py @@ -0,0 +1,480 @@ +""" VoVNet (V1 & V2) + +Papers: +* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 +* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Looked at https://github.com/youngwanLEE/vovnet-detectron2 & +https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +for some reference, rewrote most of the code. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from typing import List, Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, \ + create_attn, create_norm_act_layer +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['VovNet'] # model_registry will add each entrypoint fn to this + + +class SequentialAppendList(nn.Sequential): + def __init__(self, *args): + super(SequentialAppendList, self).__init__(*args) + + def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: + for i, module in enumerate(self): + if i == 0: + concat_list.append(module(x)) + else: + concat_list.append(module(concat_list[-1])) + x = torch.cat(concat_list, dim=1) + return x + + +class OsaBlock(nn.Module): + + def __init__( + self, + in_chs, + mid_chs, + out_chs, + layer_per_block, + residual=False, + depthwise=False, + attn='', + norm_layer=BatchNormAct2d, + act_layer=nn.ReLU, + drop_path=None, + ): + super(OsaBlock, self).__init__() + + self.residual = residual + self.depthwise = depthwise + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + next_in_chs = in_chs + if self.depthwise and next_in_chs != mid_chs: + assert not residual + self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs) + else: + self.conv_reduction = None + + mid_convs = [] + for i in range(layer_per_block): + if self.depthwise: + conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs) + else: + conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs) + next_in_chs = mid_chs + mid_convs.append(conv) + self.conv_mid = SequentialAppendList(*mid_convs) + + # feature aggregation + next_in_chs = in_chs + layer_per_block * mid_chs + self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs) + + self.attn = create_attn(attn, out_chs) if attn else None + + self.drop_path = drop_path + + def forward(self, x): + output = [x] + if self.conv_reduction is not None: + x = self.conv_reduction(x) + x = self.conv_mid(x, output) + x = self.conv_concat(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.residual: + x = x + output[0] + return x + + +class OsaStage(nn.Module): + + def __init__( + self, + in_chs, + mid_chs, + out_chs, + block_per_stage, + layer_per_block, + downsample=True, + residual=True, + depthwise=False, + attn='ese', + norm_layer=BatchNormAct2d, + act_layer=nn.ReLU, + drop_path_rates=None, + ): + super(OsaStage, self).__init__() + self.grad_checkpointing = False + + if downsample: + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + else: + self.pool = None + + blocks = [] + for i in range(block_per_stage): + last_block = i == block_per_stage - 1 + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + blocks += [OsaBlock( + in_chs, + mid_chs, + out_chs, + layer_per_block, + residual=residual and i > 0, + depthwise=depthwise, + attn=attn if last_block else '', + norm_layer=norm_layer, + act_layer=act_layer, + drop_path=drop_path + )] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.pool is not None: + x = self.pool(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class VovNet(nn.Module): + + def __init__( + self, + cfg, + in_chans=3, + num_classes=1000, + global_pool='avg', + output_stride=32, + norm_layer=BatchNormAct2d, + act_layer=nn.ReLU, + drop_rate=0., + drop_path_rate=0., + **kwargs, + ): + """ + Args: + cfg (dict): Model architecture configuration + in_chans (int): Number of input channels (default: 3) + num_classes (int): Number of classifier classes (default: 1000) + global_pool (str): Global pooling type (default: 'avg') + output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) + norm_layer (Union[str, nn.Module]): normalization layer + act_layer (Union[str, nn.Module]): activation layer + drop_rate (float): Dropout rate (default: 0.) + drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) + kwargs (dict): Extra kwargs overlayed onto cfg + """ + super(VovNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride == 32 # FIXME support dilation + + cfg = dict(cfg, **kwargs) + stem_stride = cfg.get("stem_stride", 4) + stem_chs = cfg["stem_chs"] + stage_conv_chs = cfg["stage_conv_chs"] + stage_out_chs = cfg["stage_out_chs"] + block_per_stage = cfg["block_per_stage"] + layer_per_block = cfg["layer_per_block"] + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + # Stem module + last_stem_stride = stem_stride // 2 + conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct + self.stem = nn.Sequential(*[ + ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), + conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), + conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), + ]) + self.feature_info = [dict( + num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] + current_stride = stem_stride + + # OSA stages + stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) + in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] + stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) + stages = [] + for i in range(4): # num_stages + downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 + stages += [OsaStage( + in_ch_list[i], + stage_conv_chs[i], + stage_out_chs[i], + block_per_stage[i], + layer_per_block, + downsample=downsample, + drop_path_rates=stage_dpr[i], + **stage_args, + )] + self.num_features = stage_out_chs[i] + current_stride *= 2 if downsample else 1 + self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.head_hidden_size = self.num_features + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + return self.stages(x) + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & +# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +model_cfgs = dict( + vovnet39a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=False, + depthwise=False, + attn='', + ), + vovnet57a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=False, + depthwise=False, + attn='', + + ), + ese_vovnet19b_slim_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + + ), + ese_vovnet19b_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + ), + ese_vovnet19b_slim=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet19b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet57b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet99b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 3, 9, 3], + residual=True, + depthwise=False, + attn='ese', + ), + eca_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='eca', + ), +) +model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] + + +def _create_vovnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + VovNet, + variant, + pretrained, + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'vovnet39a.untrained': _cfg(url=''), + 'vovnet57a.untrained': _cfg(url=''), + 'ese_vovnet19b_slim_dw.untrained': _cfg(url=''), + 'ese_vovnet19b_dw.ra_in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'ese_vovnet19b_slim.untrained': _cfg(url=''), + 'ese_vovnet39b.ra_in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'ese_vovnet57b.untrained': _cfg(url=''), + 'ese_vovnet99b.untrained': _cfg(url=''), + 'eca_vovnet39b.untrained': _cfg(url=''), + 'ese_vovnet39b_evos.untrained': _cfg(url=''), +}) + + +@register_model +def vovnet39a(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) + + +@register_model +def vovnet57a(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) + + +@register_model +def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet: + return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) + + +# Experimental Models + +@register_model +def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet: + def norm_act_fn(num_features, **nkwargs): + return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs) + return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) diff --git a/pytorch-image-models/timm/models/xception.py b/pytorch-image-models/timm/models/xception.py new file mode 100644 index 0000000000000000000000000000000000000000..e1f92abfa01328b52b7feaba03afa4e7a0474495 --- /dev/null +++ b/pytorch-image-models/timm/models/xception.py @@ -0,0 +1,255 @@ +""" +Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) + +@author: tstandley +Adapted by cadene + +Creates an Xception Model as defined in: + +Francois Chollet +Xception: Deep Learning with Depthwise Separable Convolutions +https://arxiv.org/pdf/1610.02357.pdf + +This weights ported from the Keras implementation. Achieves the following performance on the validation set: + +Loss:0.9173 Prec@1:78.892 Prec@5:94.292 + +REMEMBER to set your image size to 3x299x299 for both test and validation + +normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + +The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 +""" +import torch.jit +import torch.nn as nn +import torch.nn.functional as F + +from timm.layers import create_classifier +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs, register_model_deprecations + +__all__ = ['Xception'] + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d( + in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_channels != in_channels or strides != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_channels) + else: + self.skip = None + + rep = [] + for i in range(reps): + if grow_first: + inc = in_channels if i == 0 else out_channels + outc = out_channels + else: + inc = in_channels + outc = in_channels if i < (reps - 1) else out_channels + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) + rep.append(nn.BatchNorm2d(outc)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception, self).__init__() + self.drop_rate = drop_rate + self.global_pool = global_pool + self.num_classes = num_classes + self.num_features = self.head_hidden_size = 2048 + + self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, 2, 2, start_with_relu=False) + self.block2 = Block(128, 256, 2, 2) + self.block3 = Block(256, 728, 2, 2) + + self.block4 = Block(728, 728, 3, 1) + self.block5 = Block(728, 728, 3, 1) + self.block6 = Block(728, 728, 3, 1) + self.block7 = Block(728, 728, 3, 1) + + self.block8 = Block(728, 728, 3, 1) + self.block9 = Block(728, 728, 3, 1) + self.block10 = Block(728, 728, 3, 1) + self.block11 = Block(728, 728, 3, 1) + + self.block12 = Block(728, 1024, 2, 2, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(self.num_features) + self.act4 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block2.rep.0'), + dict(num_chs=256, reduction=8, module='block3.rep.0'), + dict(num_chs=728, reduction=16, module='block12.rep.0'), + dict(num_chs=2048, reduction=32, module='act4'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # #------- init weights -------- + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^conv[12]|bn[12]', + blocks=[ + (r'^block(\d+)', None), + (r'^conv[34]|bn[34]', (99,)), + ], + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, "gradient checkpointing not supported" + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.fc + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + return x if pre_logits else self.fc(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception, variant, pretrained, + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +default_cfgs = generate_default_cfgs({ + 'legacy_xception.tf_in1k': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', + 'input_size': (3, 299, 299), + 'pool_size': (10, 10), + 'crop_pct': 0.8975, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } +}) + + +@register_model +def legacy_xception(pretrained=False, **kwargs) -> Xception: + return _xception('legacy_xception', pretrained=pretrained, **kwargs) + + +register_model_deprecations(__name__, { + 'xception': 'legacy_xception', +}) diff --git a/pytorch-image-models/timm/models/xception_aligned.py b/pytorch-image-models/timm/models/xception_aligned.py new file mode 100644 index 0000000000000000000000000000000000000000..f9071ed3f3941487b00f2ec7caef9475ac8ebc81 --- /dev/null +++ b/pytorch-image-models/timm/models/xception_aligned.py @@ -0,0 +1,435 @@ +"""Pytorch impl of Aligned Xception 41, 65, 71 + +This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at +https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md + +Hacked together by / Copyright 2020 Ross Wightman +""" +from functools import partial +from typing import List, Dict, Type, Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import ClassifierHead, ConvNormAct, DropPath, PadType, create_conv2d, get_norm_act_layer +from timm.layers.helpers import to_3tuple +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['XceptionAligned'] + + +class SeparableConv2d(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: int = 1, + padding: PadType = '', + act_layer: Type[nn.Module] = nn.ReLU, + norm_layer: Type[nn.Module] = nn.BatchNorm2d, + ): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + self.conv_dw = create_conv2d( + in_chs, in_chs, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + self.bn_dw = norm_layer(in_chs) + self.act_dw = act_layer(inplace=True) if act_layer is not None else nn.Identity() + + # pointwise convolution + self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) + self.bn_pw = norm_layer(out_chs) + self.act_pw = act_layer(inplace=True) if act_layer is not None else nn.Identity() + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn_dw(x) + x = self.act_dw(x) + x = self.conv_pw(x) + x = self.bn_pw(x) + x = self.act_pw(x) + return x + + +class PreSeparableConv2d(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + stride: int = 1, + dilation: int = 1, + padding: PadType = '', + act_layer: Type[nn.Module] = nn.ReLU, + norm_layer: Type[nn.Module] = nn.BatchNorm2d, + first_act: bool = True, + ): + super(PreSeparableConv2d, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) + self.kernel_size = kernel_size + self.dilation = dilation + + self.norm = norm_act_layer(in_chs, inplace=True) if first_act else nn.Identity() + # depthwise convolution + self.conv_dw = create_conv2d( + in_chs, in_chs, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + + # pointwise convolution + self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) + + def forward(self, x): + x = self.norm(x) + x = self.conv_dw(x) + x = self.conv_pw(x) + return x + + +class XceptionModule(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + dilation: int = 1, + pad_type: PadType = '', + start_with_relu: bool = True, + no_skip: bool = False, + act_layer: Type[nn.Module] = nn.ReLU, + norm_layer: Optional[Type[nn.Module]] = None, + drop_path: Optional[nn.Module] = None + ): + super(XceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = ConvNormAct( + in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, apply_act=False) + else: + self.shortcut = None + + separable_act_layer = None if start_with_relu else act_layer + self.stack = nn.Sequential() + for i in range(3): + if start_with_relu: + self.stack.add_module(f'act{i + 1}', act_layer(inplace=i > 0)) + self.stack.add_module(f'conv{i + 1}', SeparableConv2d( + in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, + act_layer=separable_act_layer, norm_layer=norm_layer)) + in_chs = out_chs[i] + + self.drop_path = drop_path + + def forward(self, x): + skip = x + x = self.stack(x) + if self.shortcut is not None: + skip = self.shortcut(skip) + if not self.no_skip: + if self.drop_path is not None: + x = self.drop_path(x) + x = x + skip + return x + + +class PreXceptionModule(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + dilation: int = 1, + pad_type: PadType = '', + no_skip: bool = False, + act_layer: Type[nn.Module] = nn.ReLU, + norm_layer: Optional[Type[nn.Module]] = None, + drop_path: Optional[nn.Module] = None + ): + super(PreXceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = create_conv2d(in_chs, self.out_channels, 1, stride=stride) + else: + self.shortcut = nn.Identity() + + self.norm = get_norm_act_layer(norm_layer, act_layer=act_layer)(in_chs, inplace=True) + self.stack = nn.Sequential() + for i in range(3): + self.stack.add_module(f'conv{i + 1}', PreSeparableConv2d( + in_chs, + out_chs[i], + 3, + stride=stride if i == 2 else 1, + dilation=dilation, + padding=pad_type, + act_layer=act_layer, + norm_layer=norm_layer, + first_act=i > 0, + )) + in_chs = out_chs[i] + + self.drop_path = drop_path + + def forward(self, x): + x = self.norm(x) + skip = x + x = self.stack(x) + if not self.no_skip: + if self.drop_path is not None: + x = self.drop_path(x) + x = x + self.shortcut(skip) + return x + + +class XceptionAligned(nn.Module): + """Modified Aligned Xception + """ + + def __init__( + self, + block_cfg: List[Dict], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + preact: bool = False, + act_layer: Type[nn.Module] = nn.ReLU, + norm_layer: Type[nn.Module] = nn.BatchNorm2d, + drop_rate: float = 0., + drop_path_rate: float = 0., + global_pool: str = 'avg', + ): + super(XceptionAligned, self).__init__() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) + self.stem = nn.Sequential(*[ + ConvNormAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), + create_conv2d(32, 64, kernel_size=3, stride=1) if preact else + ConvNormAct(32, 64, kernel_size=3, stride=1, **layer_args) + ]) + + curr_dilation = 1 + curr_stride = 2 + self.feature_info = [] + self.blocks = nn.Sequential() + module_fn = PreXceptionModule if preact else XceptionModule + net_num_blocks = len(block_cfg) + net_block_idx = 0 + for i, b in enumerate(block_cfg): + block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule + b['drop_path'] = DropPath(block_dpr) if block_dpr > 0. else None + b['dilation'] = curr_dilation + if b['stride'] > 1: + name = f'blocks.{i}.stack.conv2' if preact else f'blocks.{i}.stack.act3' + self.feature_info += [dict(num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=name)] + next_stride = curr_stride * b['stride'] + if next_stride > output_stride: + curr_dilation *= b['stride'] + b['stride'] = 1 + else: + curr_stride = next_stride + self.blocks.add_module(str(i), module_fn(**b, **layer_args)) + self.num_features = self.blocks[-1].out_channels + net_block_idx += 1 + + self.feature_info += [dict( + num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] + self.act = act_layer(inplace=True) if preact else nn.Identity() + self.head_hidden_size = self.num_features + self.head = ClassifierHead( + in_features=self.num_features, + num_classes=num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^blocks\.(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.act(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + XceptionAligned, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), + 'crop_pct': 0.903, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'xception65.ra3_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.94, + ), + + 'xception41.tf_in1k': _cfg(hf_hub_id='timm/'), + 'xception65.tf_in1k': _cfg(hf_hub_id='timm/'), + 'xception71.tf_in1k': _cfg(hf_hub_id='timm/'), + + 'xception41p.ra3_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.94, + ), + 'xception65p.ra3_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.94, + ), +}) + + +@register_model +def xception41(pretrained=False, **kwargs) -> XceptionAligned: + """ Modified Aligned Xception-41 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1)) + return _xception('xception41', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def xception65(pretrained=False, **kwargs) -> XceptionAligned: + """ Modified Aligned Xception-65 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1)) + return _xception('xception65', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def xception71(pretrained=False, **kwargs) -> XceptionAligned: + """ Modified Aligned Xception-71 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=1), + dict(in_chs=256, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=1), + dict(in_chs=728, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1)) + return _xception('xception71', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def xception41p(pretrained=False, **kwargs) -> XceptionAligned: + """ Modified Aligned Xception-41 w/ Pre-Act + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), no_skip=True, stride=1), + ] + model_args = dict(block_cfg=block_cfg, preact=True, norm_layer=nn.BatchNorm2d) + return _xception('xception41p', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def xception65p(pretrained=False, **kwargs) -> XceptionAligned: + """ Modified Aligned Xception-65 w/ Pre-Act + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True), + ] + model_args = dict( + block_cfg=block_cfg, preact=True, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1)) + return _xception('xception65p', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/xcit.py b/pytorch-image-models/timm/models/xcit.py new file mode 100644 index 0000000000000000000000000000000000000000..1e902ac23f552ed914f36b9aa16f8cd8ff595a6a --- /dev/null +++ b/pytorch-image-models/timm/models/xcit.py @@ -0,0 +1,1003 @@ +""" Cross-Covariance Image Transformer (XCiT) in PyTorch + +Paper: + - https://arxiv.org/abs/2106.09681 + +Same as the official implementation, with some minor adaptations, original copyright below + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +import math +from functools import partial +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, trunc_normal_, to_2tuple, use_fused_attn +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_module +from ._registry import register_model, generate_default_cfgs, register_model_deprecations +from .cait import ClassAttn +from .vision_transformer import Mlp + +__all__ = ['Xcit'] # model_registry will add each entrypoint fn to this + + +@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method +class PositionalEncodingFourier(nn.Module): + """ + Positional encoding relying on a fourier kernel matching the one used in the "Attention is all you Need" paper. + Based on the official XCiT code + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + """ + + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + self.eps = 1e-6 + + def forward(self, B: int, H: int, W: int): + device = self.token_projection.weight.device + dtype = self.token_projection.weight.dtype + y_embed = torch.arange(1, H + 1, device=device).to(torch.float32).unsqueeze(1).repeat(1, 1, W) + x_embed = torch.arange(1, W + 1, device=device).to(torch.float32).repeat(1, H, 1) + y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange(self.hidden_dim, device=device).to(torch.float32) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos.to(dtype)) + return pos.repeat(B, 1, 1, 1) # (B, C, H, W) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution + batch norm""" + return torch.nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), + nn.BatchNorm2d(out_planes) + ) + + +class ConvPatchEmbed(nn.Module): + """Image to Patch Embedding using multiple convolutional layers""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): + super().__init__() + img_size = to_2tuple(img_size) + num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + if patch_size == 16: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 8, 2), + act_layer(), + conv3x3(embed_dim // 8, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + elif patch_size == 8: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + else: + raise('For convolutional projection, patch size has to be in [8, 16]') + + def forward(self, x): + x = self.proj(x) + Hp, Wp = x.shape[2], x.shape[3] + x = x.flatten(2).transpose(1, 2) # (B, N, C) + return x, (Hp, Wp) + + +class LPI(nn.Module): + """ + Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the + implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable + 3x3 convolutions with GeLU and BatchNorm2d + """ + + def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): + super().__init__() + out_features = out_features or in_features + + padding = kernel_size // 2 + + self.conv1 = torch.nn.Conv2d( + in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) + self.act = act_layer() + self.bn = nn.BatchNorm2d(in_features) + self.conv2 = torch.nn.Conv2d( + in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) + + def forward(self, x, H: int, W: int): + B, N, C = x.shape + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.conv1(x) + x = self.act(x) + x = self.bn(x) + x = self.conv2(x) + x = x.reshape(B, C, N).permute(0, 2, 1) + return x + + +class ClassAttentionBlock(nn.Module): + """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + eta=1., + tokens_norm=False, + ): + super().__init__() + self.norm1 = norm_layer(dim) + + self.attn = ClassAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) + + if eta is not None: # LayerScale Initialization (no layerscale when None) + self.gamma1 = nn.Parameter(eta * torch.ones(dim)) + self.gamma2 = nn.Parameter(eta * torch.ones(dim)) + else: + self.gamma1, self.gamma2 = 1.0, 1.0 + + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + self.tokens_norm = tokens_norm + + def forward(self, x): + x_norm1 = self.norm1(x) + x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) + x = x + self.drop_path(self.gamma1 * x_attn) + if self.tokens_norm: + x = self.norm2(x) + else: + x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) + x_res = x + cls_token = x[:, 0:1] + cls_token = self.gamma2 * self.mlp(cls_token) + x = torch.cat([cls_token, x[:, 1:]], dim=1) + x = x_res + self.drop_path(x) + return x + + +class XCA(nn.Module): + fused_attn: torch.jit.Final[bool] + """ Cross-Covariance Attention (XCA) + Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax + normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + self.fused_attn = use_fused_attn(experimental=True) + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + if self.fused_attn: + q = torch.nn.functional.normalize(q, dim=-1) * self.temperature + k = torch.nn.functional.normalize(k, dim=-1) + x = torch.nn.functional.scaled_dot_product_attention(q, k, v, scale=1.0) + else: + # Paper section 3.2 l2-Normalization and temperature scaling + q = torch.nn.functional.normalize(q, dim=-1) + k = torch.nn.functional.normalize(k, dim=-1) + attn = (q @ k.transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class XCABlock(nn.Module): + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + eta=1., + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm3 = norm_layer(dim) + self.local_mp = LPI(in_features=dim, act_layer=act_layer) + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) + + self.gamma1 = nn.Parameter(eta * torch.ones(dim)) + self.gamma3 = nn.Parameter(eta * torch.ones(dim)) + self.gamma2 = nn.Parameter(eta * torch.ones(dim)) + + def forward(self, x, H: int, W: int): + x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) + # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + +class Xcit(nn.Module): + """ + Based on timm and DeiT code bases + https://github.com/rwightman/pytorch-image-models/tree/master/timm + https://github.com/facebookresearch/deit/ + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='token', + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + pos_drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_layer=None, + norm_layer=None, + cls_attn_layers=2, + use_pos_embed=True, + eta=1., + tokens_norm=False, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP + pos_drop_rate: position embedding dropout rate + proj_drop_rate (float): projection dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate (constant across all layers) + norm_layer: (nn.Module): normalization layer + cls_attn_layers: (int) Depth of Class attention layers + use_pos_embed: (bool) whether to use positional encoding + eta: (float) layerscale initialization value + tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA + + Notes: + - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch + interaction (class LPI) and the patch embedding (class ConvPatchEmbed) + """ + super().__init__() + assert global_pool in ('', 'avg', 'token') + img_size = to_2tuple(img_size) + assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ + '`patch_size` should divide image dimensions evenly' + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.num_classes = num_classes + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim + self.global_pool = global_pool + self.grad_checkpointing = False + + self.patch_embed = ConvPatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + act_layer=act_layer, + ) + r = patch_size + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_pos_embed: + self.pos_embed = PositionalEncodingFourier(dim=embed_dim) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=pos_drop_rate) + + self.blocks = nn.ModuleList([ + XCABlock( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_rate, + act_layer=act_layer, + norm_layer=norm_layer, + eta=eta, + ) + for _ in range(depth)]) + self.feature_info = [dict(num_chs=embed_dim, reduction=r, module=f'blocks.{i}') for i in range(depth)] + + self.cls_attn_blocks = nn.ModuleList([ + ClassAttentionBlock( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=drop_rate, + attn_drop=attn_drop_rate, + act_layer=act_layer, + norm_layer=norm_layer, + eta=eta, + tokens_norm=tokens_norm, + ) + for _ in range(cls_attn_layers)]) + + # Classifier head + self.norm = norm_layer(embed_dim) + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Init weights + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=r'^blocks\.(\d+)', + cls_attn_blocks=[(r'^cls_attn_blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'token') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass + B, _, height, width = x.shape + x, (Hp, Wp) = self.patch_embed(x) + if self.pos_embed is not None: + # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + x = self.pos_drop(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x, Hp, Wp) + if i in take_indices: + # normalize intermediates with final norm layer if enabled + intermediates.append(self.norm(x) if norm else x) + + # process intermediates + if reshape: + # reshape to BCHW output format + intermediates = [y.reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] + + if intermediates_only: + return intermediates + + # NOTE not supporting return of class tokens + x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) + for blk in self.cls_attn_blocks: + x = blk(x) + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.cls_attn_blocks = nn.ModuleList() # prune token blocks with head + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + B = x.shape[0] + # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) + x, (Hp, Wp) = self.patch_embed(x) + + if self.pos_embed is not None: + # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + x = self.pos_drop(x) + + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, Hp, Wp) + else: + x = blk(x, Hp, Wp) + + x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) + + for blk in self.cls_attn_blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x) + else: + x = blk(x) + + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + # For consistency with timm's transformer models while being compatible with official weights source we rename + # pos_embeder to pos_embed. Also account for use_pos_embed == False + use_pos_embed = getattr(model, 'pos_embed', None) is not None + pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] + for k in pos_embed_keys: + if use_pos_embed: + state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) + else: + del state_dict[k] + # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors + # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v + if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): + num_ca_blocks = len(model.cls_attn_blocks) + for i in range(num_ca_blocks): + qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') + qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] + qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) + if qkv_bias is not None: + qkv_bias = qkv_bias.reshape(3, -1) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] + return state_dict + + +def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + model = build_model_with_cfg( + Xcit, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # Patch size 16 + 'xcit_nano_12_p16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), + 'xcit_nano_12_p16_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), + 'xcit_nano_12_p16_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), + 'xcit_tiny_12_p16_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), + 'xcit_tiny_12_p16_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), + 'xcit_tiny_24_p16_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), + 'xcit_tiny_24_p16_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), + 'xcit_small_12_p16_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), + 'xcit_small_12_p16_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), + 'xcit_small_24_p16_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), + 'xcit_small_24_p16_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), + 'xcit_medium_24_p16_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), + 'xcit_medium_24_p16_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p16_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), + 'xcit_large_24_p16_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), + 'xcit_large_24_p16_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), + + # Patch size 8 + 'xcit_nano_12_p8_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), + 'xcit_nano_12_p8_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), + 'xcit_nano_12_p8_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p8_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), + 'xcit_tiny_12_p8_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), + 'xcit_tiny_12_p8_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p8_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), + 'xcit_tiny_24_p8_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), + 'xcit_tiny_24_p8_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p8_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), + 'xcit_small_12_p8_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), + 'xcit_small_12_p8_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p8_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), + 'xcit_small_24_p8_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), + 'xcit_small_24_p8_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p8_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), + 'xcit_medium_24_p8_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), + 'xcit_medium_24_p8_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p8_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), + 'xcit_large_24_p8_224.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), + 'xcit_large_24_p8_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), +}) + + +@register_model +def xcit_nano_12_p16_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) + model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_nano_12_p16_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384) + model = _create_xcit('xcit_nano_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_12_p16_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_12_p16_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_tiny_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_12_p16_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_12_p16_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_small_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_24_p16_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_24_p16_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_tiny_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_24_p16_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_24_p16_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_small_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_medium_24_p16_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_medium_24_p16_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_medium_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_large_24_p16_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_large_24_p16_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_large_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +# Patch size 8x8 models +@register_model +def xcit_nano_12_p8_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) + model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_nano_12_p8_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) + model = _create_xcit('xcit_nano_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_12_p8_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_12_p8_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_tiny_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_12_p8_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_12_p8_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) + model = _create_xcit('xcit_small_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_24_p8_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_tiny_24_p8_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_tiny_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_24_p8_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_small_24_p8_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_small_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_medium_24_p8_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_medium_24_p8_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_medium_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_large_24_p8_224(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def xcit_large_24_p8_384(pretrained=False, **kwargs) -> Xcit: + model_args = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) + model = _create_xcit('xcit_large_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +register_model_deprecations(__name__, { + # Patch size 16 + 'xcit_nano_12_p16_224_dist': 'xcit_nano_12_p16_224.fb_dist_in1k', + 'xcit_nano_12_p16_384_dist': 'xcit_nano_12_p16_384.fb_dist_in1k', + 'xcit_tiny_12_p16_224_dist': 'xcit_tiny_12_p16_224.fb_dist_in1k', + 'xcit_tiny_12_p16_384_dist': 'xcit_tiny_12_p16_384.fb_dist_in1k', + 'xcit_tiny_24_p16_224_dist': 'xcit_tiny_24_p16_224.fb_dist_in1k', + 'xcit_tiny_24_p16_384_dist': 'xcit_tiny_24_p16_384.fb_dist_in1k', + 'xcit_small_12_p16_224_dist': 'xcit_small_12_p16_224.fb_dist_in1k', + 'xcit_small_12_p16_384_dist': 'xcit_small_12_p16_384.fb_dist_in1k', + 'xcit_small_24_p16_224_dist': 'xcit_small_24_p16_224.fb_dist_in1k', + 'xcit_small_24_p16_384_dist': 'xcit_small_24_p16_384.fb_dist_in1k', + 'xcit_medium_24_p16_224_dist': 'xcit_medium_24_p16_224.fb_dist_in1k', + 'xcit_medium_24_p16_384_dist': 'xcit_medium_24_p16_384.fb_dist_in1k', + 'xcit_large_24_p16_224_dist': 'xcit_large_24_p16_224.fb_dist_in1k', + 'xcit_large_24_p16_384_dist': 'xcit_large_24_p16_384.fb_dist_in1k', + + # Patch size 8 + 'xcit_nano_12_p8_224_dist': 'xcit_nano_12_p8_224.fb_dist_in1k', + 'xcit_nano_12_p8_384_dist': 'xcit_nano_12_p8_384.fb_dist_in1k', + 'xcit_tiny_12_p8_224_dist': 'xcit_tiny_12_p8_224.fb_dist_in1k', + 'xcit_tiny_12_p8_384_dist': 'xcit_tiny_12_p8_384.fb_dist_in1k', + 'xcit_tiny_24_p8_224_dist': 'xcit_tiny_24_p8_224.fb_dist_in1k', + 'xcit_tiny_24_p8_384_dist': 'xcit_tiny_24_p8_384.fb_dist_in1k', + 'xcit_small_12_p8_224_dist': 'xcit_small_12_p8_224.fb_dist_in1k', + 'xcit_small_12_p8_384_dist': 'xcit_small_12_p8_384.fb_dist_in1k', + 'xcit_small_24_p8_224_dist': 'xcit_small_24_p8_224.fb_dist_in1k', + 'xcit_small_24_p8_384_dist': 'xcit_small_24_p8_384.fb_dist_in1k', + 'xcit_medium_24_p8_224_dist': 'xcit_medium_24_p8_224.fb_dist_in1k', + 'xcit_medium_24_p8_384_dist': 'xcit_medium_24_p8_384.fb_dist_in1k', + 'xcit_large_24_p8_224_dist': 'xcit_large_24_p8_224.fb_dist_in1k', + 'xcit_large_24_p8_384_dist': 'xcit_large_24_p8_384.fb_dist_in1k', +}) diff --git a/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a59f3fade92a7062a4d5ca840de3f5242098584 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5ba7f6b06164af0a4104135c264153943ed3d69 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f4a1c9e38c235a083b3d95ed035b711e40a43b2 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdeb4c790a5e1f5677c420232f431b9f10269e53 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3fbe73691cd52b81fa9aa68c6fb254d09437ca4 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19d710b4133be0763fe070d9a8ee734c407e5c84 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa86a5b987da7450ac4af5d08bfb2f36343d1e35 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61140fd73bce131541063326bfdb3011bdd63ef5 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39c83a15dbbd0635add6d4a9a9539cecfaac637b Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12e37ae50c24b27a73870248623ffc6051f08c37 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..872f8d3932984926cd8b207782b68388302f39b9 Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d2c189c6843c70516a3625c401028b80ad2089f Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..610e958f101fdb53e7375a5cdd4da91813072cdd Binary files /dev/null and b/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/jit.py b/pytorch-image-models/timm/utils/jit.py new file mode 100644 index 0000000000000000000000000000000000000000..d527411fd3e1985639bb0b161bd484142a3619dd --- /dev/null +++ b/pytorch-image-models/timm/utils/jit.py @@ -0,0 +1,58 @@ +""" JIT scripting/tracing utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os + +import torch + + +def set_jit_legacy(): + """ Set JIT executor to legacy w/ support for op fusion + This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes + in the JIT exectutor. These API are not supported so could change. + """ + # + assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + #torch._C._jit_set_texpr_fuser_enabled(True) + + +def set_jit_fuser(fuser): + if fuser == "te": + # default fuser should be == 'te' + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(True) + try: + torch._C._jit_set_nvfuser_enabled(False) + except Exception: + pass + elif fuser == "old" or fuser == "legacy": + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(False) + try: + torch._C._jit_set_nvfuser_enabled(False) + except Exception: + pass + elif fuser == "nvfuser" or fuser == "nvf": + os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1' + #os.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1' + #os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_can_fuse_on_cpu() + torch._C._jit_can_fuse_on_gpu() + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_nvfuser_guard_mode(True) + torch._C._jit_set_nvfuser_enabled(True) + else: + assert False, f"Invalid jit fuser ({fuser})" diff --git a/pytorch-image-models/timm/utils/summary.py b/pytorch-image-models/timm/utils/summary.py new file mode 100644 index 0000000000000000000000000000000000000000..eccbb941fef37283f1819180c0d9834459a6ec14 --- /dev/null +++ b/pytorch-image-models/timm/utils/summary.py @@ -0,0 +1,51 @@ +""" Summary utilities + +Hacked together by / Copyright 2020 Ross Wightman +""" +import csv +import os +from collections import OrderedDict +try: + import wandb +except ImportError: + pass + + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + + +def update_summary( + epoch, + train_metrics, + eval_metrics, + filename, + lr=None, + write_header=False, + log_wandb=False, +): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + if eval_metrics: + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if lr is not None: + rowd['lr'] = lr + if log_wandb: + wandb.log(rowd) + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd)