meg HF staff commited on
Commit
c3fdff6
·
verified ·
1 Parent(s): 81d747c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc +0 -0
  2. pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc +0 -0
  3. pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc +0 -0
  4. pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc +0 -0
  5. pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc +0 -0
  6. pytorch-image-models/timm/data/readers/reader_image_in_tar.py +229 -0
  7. pytorch-image-models/timm/data/readers/reader_tfds.py +355 -0
  8. pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc +0 -0
  9. pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc +0 -0
  10. pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc +0 -0
  11. pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc +0 -0
  12. pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc +0 -0
  13. pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc +0 -0
  14. pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc +0 -0
  15. pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc +0 -0
  16. pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc +0 -0
  17. pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc +0 -0
  18. pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc +0 -0
  19. pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc +0 -0
  20. pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc +0 -0
  21. pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc +0 -0
  22. pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc +0 -0
  23. pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc +0 -0
  24. pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc +0 -0
  25. pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc +0 -0
  26. pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc +0 -0
  27. pytorch-image-models/timm/layers/adaptive_avgmax_pool.py +183 -0
  28. pytorch-image-models/timm/layers/attention_pool.py +105 -0
  29. pytorch-image-models/timm/layers/attention_pool2d.py +278 -0
  30. pytorch-image-models/timm/layers/blur_pool.py +91 -0
  31. pytorch-image-models/timm/layers/bottleneck_attn.py +157 -0
  32. pytorch-image-models/timm/layers/cbam.py +112 -0
  33. pytorch-image-models/timm/layers/classifier.py +283 -0
  34. pytorch-image-models/timm/layers/cond_conv2d.py +123 -0
  35. pytorch-image-models/timm/layers/config.py +149 -0
  36. pytorch-image-models/timm/layers/conv2d_same.py +110 -0
  37. pytorch-image-models/timm/layers/conv_bn_act.py +92 -0
  38. pytorch-image-models/timm/layers/create_act.py +138 -0
  39. pytorch-image-models/timm/layers/create_attn.py +89 -0
  40. pytorch-image-models/timm/layers/create_conv2d.py +36 -0
  41. pytorch-image-models/timm/layers/evo_norm.py +352 -0
  42. pytorch-image-models/timm/layers/fast_norm.py +150 -0
  43. pytorch-image-models/timm/layers/filter_response_norm.py +68 -0
  44. pytorch-image-models/timm/layers/format.py +58 -0
  45. pytorch-image-models/timm/layers/gather_excite.py +90 -0
  46. pytorch-image-models/timm/layers/global_context.py +67 -0
  47. pytorch-image-models/timm/layers/halo_attn.py +233 -0
  48. pytorch-image-models/timm/layers/hybrid_embed.py +253 -0
  49. pytorch-image-models/timm/layers/inplace_abn.py +87 -0
  50. pytorch-image-models/timm/layers/lambda_layer.py +134 -0
pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (1.2 kB). View file
 
pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc ADDED
Binary file (611 Bytes). View file
 
pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc ADDED
Binary file (12.5 kB). View file
 
pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc ADDED
Binary file (1.88 kB). View file
 
pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc ADDED
Binary file (1.11 kB). View file
 
pytorch-image-models/timm/data/readers/reader_image_in_tar.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ A dataset reader that reads tarfile based datasets
2
+
3
+ This reader can extract image samples from:
4
+ * a single tar of image files
5
+ * a folder of multiple tarfiles containing imagefiles
6
+ * a tar of tars containing image files
7
+
8
+ Labels are based on the combined folder and/or tar name structure.
9
+
10
+ Hacked together by / Copyright 2020 Ross Wightman
11
+ """
12
+ import logging
13
+ import os
14
+ import pickle
15
+ import tarfile
16
+ from glob import glob
17
+ from typing import List, Tuple, Dict, Set, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from timm.utils.misc import natural_key
22
+
23
+ from .class_map import load_class_map
24
+ from .img_extensions import get_img_extensions
25
+ from .reader import Reader
26
+
27
+ _logger = logging.getLogger(__name__)
28
+ CACHE_FILENAME_SUFFIX = '_tarinfos.pickle'
29
+
30
+
31
+ class TarState:
32
+
33
+ def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None):
34
+ self.tf: tarfile.TarFile = tf
35
+ self.ti: tarfile.TarInfo = ti
36
+ self.children: Dict[str, TarState] = {} # child states (tars within tars)
37
+
38
+ def reset(self):
39
+ self.tf = None
40
+
41
+
42
+ def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions: Set[str]):
43
+ sample_count = 0
44
+ for i, ti in enumerate(tf):
45
+ if not ti.isfile():
46
+ continue
47
+ dirname, basename = os.path.split(ti.path)
48
+ name, ext = os.path.splitext(basename)
49
+ ext = ext.lower()
50
+ if ext == '.tar':
51
+ with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf:
52
+ child_info = dict(
53
+ name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[])
54
+ sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions)
55
+ _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.')
56
+ parent_info['children'].append(child_info)
57
+ elif ext in extensions:
58
+ parent_info['samples'].append(ti)
59
+ sample_count += 1
60
+ return sample_count
61
+
62
+
63
+ def extract_tarinfos(
64
+ root,
65
+ class_name_to_idx: Optional[Dict] = None,
66
+ cache_tarinfo: Optional[bool] = None,
67
+ extensions: Optional[Union[List, Tuple, Set]] = None,
68
+ sort: bool = True
69
+ ):
70
+ extensions = get_img_extensions(as_set=True) if not extensions else set(extensions)
71
+ root_is_tar = False
72
+ if os.path.isfile(root):
73
+ assert os.path.splitext(root)[-1].lower() == '.tar'
74
+ tar_filenames = [root]
75
+ root, root_name = os.path.split(root)
76
+ root_name = os.path.splitext(root_name)[0]
77
+ root_is_tar = True
78
+ else:
79
+ root_name = root.strip(os.path.sep).split(os.path.sep)[-1]
80
+ tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True)
81
+ num_tars = len(tar_filenames)
82
+ tar_bytes = sum([os.path.getsize(f) for f in tar_filenames])
83
+ assert num_tars, f'No .tar files found at specified path ({root}).'
84
+
85
+ _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...')
86
+ info = dict(tartrees=[])
87
+ cache_path = ''
88
+ if cache_tarinfo is None:
89
+ cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB
90
+ if cache_tarinfo:
91
+ cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX
92
+ cache_path = os.path.join(root, cache_filename)
93
+ if os.path.exists(cache_path):
94
+ _logger.info(f'Reading tar info from cache file {cache_path}.')
95
+ with open(cache_path, 'rb') as pf:
96
+ info = pickle.load(pf)
97
+ assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles"
98
+ else:
99
+ for i, fn in enumerate(tar_filenames):
100
+ path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0]
101
+ with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode
102
+ parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[])
103
+ num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions)
104
+ num_children = len(parent_info["children"])
105
+ _logger.debug(
106
+ f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.')
107
+ info['tartrees'].append(parent_info)
108
+ if cache_path:
109
+ _logger.info(f'Writing tar info to cache file {cache_path}.')
110
+ with open(cache_path, 'wb') as pf:
111
+ pickle.dump(info, pf)
112
+
113
+ samples = []
114
+ labels = []
115
+ build_class_map = False
116
+ if class_name_to_idx is None:
117
+ build_class_map = True
118
+
119
+ # Flatten tartree info into lists of samples and targets w/ targets based on label id via
120
+ # class map arg or from unique paths.
121
+ # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children
122
+ # this covers my current use cases and keeps things a little easier to test for now.
123
+ tarfiles = []
124
+
125
+ def _label_from_paths(*path, leaf_only=True):
126
+ path = os.path.join(*path).strip(os.path.sep)
127
+ return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_')
128
+
129
+ def _add_samples(info, fn):
130
+ added = 0
131
+ for s in info['samples']:
132
+ label = _label_from_paths(info['path'], os.path.dirname(s.path))
133
+ if not build_class_map and label not in class_name_to_idx:
134
+ continue
135
+ samples.append((s, fn, info['ti']))
136
+ labels.append(label)
137
+ added += 1
138
+ return added
139
+
140
+ _logger.info(f'Collecting samples and building tar states.')
141
+ for parent_info in info['tartrees']:
142
+ # if tartree has children, we assume all samples are at the child level
143
+ tar_name = None if root_is_tar else parent_info['name']
144
+ tar_state = TarState()
145
+ parent_added = 0
146
+ for child_info in parent_info['children']:
147
+ child_added = _add_samples(child_info, fn=tar_name)
148
+ if child_added:
149
+ tar_state.children[child_info['name']] = TarState(ti=child_info['ti'])
150
+ parent_added += child_added
151
+ parent_added += _add_samples(parent_info, fn=tar_name)
152
+ if parent_added:
153
+ tarfiles.append((tar_name, tar_state))
154
+ del info
155
+
156
+ if build_class_map:
157
+ # build class index
158
+ sorted_labels = list(sorted(set(labels), key=natural_key))
159
+ class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
160
+
161
+ _logger.info(f'Mapping targets and sorting samples.')
162
+ samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx]
163
+ if sort:
164
+ samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path))
165
+ samples, targets = zip(*samples_and_targets)
166
+ samples = np.array(samples)
167
+ targets = np.array(targets)
168
+ _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.')
169
+ return samples, targets, class_name_to_idx, tarfiles
170
+
171
+
172
+ class ReaderImageInTar(Reader):
173
+ """ Multi-tarfile dataset reader where there is one .tar file per class
174
+ """
175
+
176
+ def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None):
177
+ super().__init__()
178
+
179
+ class_name_to_idx = None
180
+ if class_map:
181
+ class_name_to_idx = load_class_map(class_map, root)
182
+ self.root = root
183
+ self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos(
184
+ self.root,
185
+ class_name_to_idx=class_name_to_idx,
186
+ cache_tarinfo=cache_tarinfo
187
+ )
188
+ self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()}
189
+ if len(tarfiles) == 1 and tarfiles[0][0] is None:
190
+ self.root_is_tar = True
191
+ self.tar_state = tarfiles[0][1]
192
+ else:
193
+ self.root_is_tar = False
194
+ self.tar_state = dict(tarfiles)
195
+ self.cache_tarfiles = cache_tarfiles
196
+
197
+ def __len__(self):
198
+ return len(self.samples)
199
+
200
+ def __getitem__(self, index):
201
+ sample = self.samples[index]
202
+ target = self.targets[index]
203
+ sample_ti, parent_fn, child_ti = sample
204
+ parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root
205
+
206
+ tf = None
207
+ cache_state = None
208
+ if self.cache_tarfiles:
209
+ cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn]
210
+ tf = cache_state.tf
211
+ if tf is None:
212
+ tf = tarfile.open(parent_abs)
213
+ if self.cache_tarfiles:
214
+ cache_state.tf = tf
215
+ if child_ti is not None:
216
+ ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None
217
+ if ctf is None:
218
+ ctf = tarfile.open(fileobj=tf.extractfile(child_ti))
219
+ if self.cache_tarfiles:
220
+ cache_state.children[child_ti.name].tf = ctf
221
+ tf = ctf
222
+
223
+ return tf.extractfile(sample_ti), target
224
+
225
+ def _filename(self, index, basename=False, absolute=False):
226
+ filename = self.samples[index][0].name
227
+ if basename:
228
+ filename = os.path.basename(filename)
229
+ return filename
pytorch-image-models/timm/data/readers/reader_tfds.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Dataset reader that wraps TFDS datasets
2
+
3
+ Wraps many (most?) TFDS image-classification datasets
4
+ from https://github.com/tensorflow/datasets
5
+ https://www.tensorflow.org/datasets/catalog/overview#image_classification
6
+
7
+ Hacked together by / Copyright 2020 Ross Wightman
8
+ """
9
+ import math
10
+ import os
11
+ import sys
12
+ from typing import Optional
13
+
14
+ import torch
15
+ import torch.distributed as dist
16
+ from PIL import Image
17
+
18
+ try:
19
+ import tensorflow as tf
20
+ tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu)
21
+ import tensorflow_datasets as tfds
22
+ try:
23
+ tfds.even_splits('', 1, drop_remainder=False) # non-buggy even_splits has drop_remainder arg
24
+ has_buggy_even_splits = False
25
+ except TypeError:
26
+ print("Warning: This version of tfds doesn't have the latest even_splits impl. "
27
+ "Please update or use tfds-nightly for better fine-grained split behaviour.")
28
+ has_buggy_even_splits = True
29
+ # NOTE uncomment below if having file limit issues on dataset build (or alter your OS defaults)
30
+ # import resource
31
+ # low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
32
+ # resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
33
+ except ImportError as e:
34
+ print(e)
35
+ print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.")
36
+ raise e
37
+
38
+ from .class_map import load_class_map
39
+ from .reader import Reader
40
+ from .shared_count import SharedCount
41
+
42
+
43
+ MAX_TP_SIZE = int(os.environ.get('TFDS_TP_SIZE', 8)) # maximum TF threadpool size, for jpeg decodes and queuing activities
44
+ SHUFFLE_SIZE = int(os.environ.get('TFDS_SHUFFLE_SIZE', 8192)) # samples to shuffle in DS queue
45
+ PREFETCH_SIZE = int(os.environ.get('TFDS_PREFETCH_SIZE', 2048)) # samples to prefetch
46
+
47
+
48
+ @tfds.decode.make_decoder()
49
+ def decode_example(serialized_image, feature, dct_method='INTEGER_ACCURATE', channels=3):
50
+ return tf.image.decode_jpeg(
51
+ serialized_image,
52
+ channels=channels,
53
+ dct_method=dct_method,
54
+ )
55
+
56
+
57
+ def even_split_indices(split, n, num_samples):
58
+ partitions = [round(i * num_samples / n) for i in range(n + 1)]
59
+ return [f"{split}[{partitions[i]}:{partitions[i + 1]}]" for i in range(n)]
60
+
61
+
62
+ def get_class_labels(info):
63
+ if 'label' not in info.features:
64
+ return {}
65
+ class_label = info.features['label']
66
+ class_to_idx = {n: class_label.str2int(n) for n in class_label.names}
67
+ return class_to_idx
68
+
69
+
70
+ class ReaderTfds(Reader):
71
+ """ Wrap Tensorflow Datasets for use in PyTorch
72
+
73
+ There several things to be aware of:
74
+ * To prevent excessive samples being dropped per epoch w/ distributed training or multiplicity of
75
+ dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last
76
+ https://github.com/pytorch/pytorch/issues/33413
77
+ * With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch
78
+ from each worker could be a different size. For training this is worked around by option above, for
79
+ validation extra samples are inserted iff distributed mode is enabled so that the batches being reduced
80
+ across replicas are of same size. This will slightly alter the results, distributed validation will not be
81
+ 100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse
82
+ since there are up to N * J extra samples with IterableDatasets.
83
+ * The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of
84
+ replicas and dataloader workers you can use. For really small datasets that only contain a few shards
85
+ you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the
86
+ benefit of distributed training or fast dataloading should be much less for small datasets.
87
+ * This wrapper is currently configured to return individual, decompressed image samples from the TFDS
88
+ dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible
89
+ to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream
90
+ components.
91
+
92
+ """
93
+
94
+ def __init__(
95
+ self,
96
+ name,
97
+ root=None,
98
+ split='train',
99
+ class_map=None,
100
+ is_training=False,
101
+ batch_size=1,
102
+ download=False,
103
+ repeats=0,
104
+ seed=42,
105
+ input_key='image',
106
+ input_img_mode='RGB',
107
+ target_key='label',
108
+ target_img_mode='',
109
+ prefetch_size=None,
110
+ shuffle_size=None,
111
+ max_threadpool_size=None
112
+ ):
113
+ """ Tensorflow-datasets Wrapper
114
+
115
+ Args:
116
+ root: root data dir (ie your TFDS_DATA_DIR. not dataset specific sub-dir)
117
+ name: tfds dataset name (eg `imagenet2012`)
118
+ split: tfds dataset split (can use all TFDS split strings eg `train[:10%]`)
119
+ is_training: training mode, shuffle enabled, dataset len rounded by batch_size
120
+ batch_size: batch_size to use to unsure total samples % batch_size == 0 in training across all dis nodes
121
+ download: download and build TFDS dataset if set, otherwise must use tfds CLI
122
+ repeats: iterate through (repeat) the dataset this many times per iteration (once if 0 or 1)
123
+ seed: common seed for shard shuffle across all distributed/worker instances
124
+ input_key: name of Feature to return as data (input)
125
+ input_img_mode: image mode if input is an image (currently PIL mode string)
126
+ target_key: name of Feature to return as target (label)
127
+ target_img_mode: image mode if target is an image (currently PIL mode string)
128
+ prefetch_size: override default tf.data prefetch buffer size
129
+ shuffle_size: override default tf.data shuffle buffer size
130
+ max_threadpool_size: override default threadpool size for tf.data
131
+ """
132
+ super().__init__()
133
+ self.root = root
134
+ self.split = split
135
+ self.is_training = is_training
136
+ self.batch_size = batch_size
137
+ self.repeats = repeats
138
+ self.common_seed = seed # a seed that's fixed across all worker / distributed instances
139
+
140
+ # performance settings
141
+ self.prefetch_size = prefetch_size or PREFETCH_SIZE
142
+ self.shuffle_size = shuffle_size or SHUFFLE_SIZE
143
+ self.max_threadpool_size = max_threadpool_size or MAX_TP_SIZE
144
+
145
+ # TFDS builder and split information
146
+ self.input_key = input_key # FIXME support tuples / lists of inputs and targets and full range of Feature
147
+ self.input_img_mode = input_img_mode
148
+ self.target_key = target_key
149
+ self.target_img_mode = target_img_mode # for dense pixel targets
150
+ self.builder = tfds.builder(name, data_dir=root)
151
+ # NOTE: the tfds command line app can be used download & prepare datasets if you don't enable download flag
152
+ if download:
153
+ self.builder.download_and_prepare()
154
+ self.remap_class = False
155
+ if class_map:
156
+ self.class_to_idx = load_class_map(class_map)
157
+ self.remap_class = True
158
+ else:
159
+ self.class_to_idx = get_class_labels(self.builder.info) if self.target_key == 'label' else {}
160
+ self.split_info = self.builder.info.splits[split]
161
+ self.num_samples = self.split_info.num_examples
162
+
163
+ # Distributed world state
164
+ self.dist_rank = 0
165
+ self.dist_num_replicas = 1
166
+ if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
167
+ self.dist_rank = dist.get_rank()
168
+ self.dist_num_replicas = dist.get_world_size()
169
+
170
+ # Attributes that are updated in _lazy_init, including the tf.data pipeline itself
171
+ self.global_num_workers = 1
172
+ self.num_workers = 1
173
+ self.worker_info = None
174
+ self.worker_seed = 0 # seed unique to each work instance
175
+ self.subsplit = None # set when data is distributed across workers using sub-splits
176
+ self.ds = None # initialized lazily on each dataloader worker process
177
+ self.init_count = 0 # number of ds TF data pipeline initializations
178
+ self.epoch_count = SharedCount()
179
+ # FIXME need to determine if reinit_each_iter is necessary. I'm don't completely trust behaviour
180
+ # of `shuffle_reshuffle_each_iteration` when there are multiple workers / nodes across epochs
181
+ self.reinit_each_iter = self.is_training
182
+
183
+ def set_epoch(self, count):
184
+ self.epoch_count.value = count
185
+
186
+ def set_loader_cfg(
187
+ self,
188
+ num_workers: Optional[int] = None,
189
+ ):
190
+ if self.ds is not None:
191
+ return
192
+ if num_workers is not None:
193
+ self.num_workers = num_workers
194
+ self.global_num_workers = self.dist_num_replicas * self.num_workers
195
+
196
+ def _lazy_init(self):
197
+ """ Lazily initialize the dataset.
198
+
199
+ This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that
200
+ will be using the dataset instance. The __init__ method is called on the main process,
201
+ this will be called in a dataloader worker process.
202
+
203
+ NOTE: There will be problems if you try to re-use this dataset across different loader/worker
204
+ instances once it has been initialized. Do not call any dataset methods that can call _lazy_init
205
+ before it is passed to dataloader.
206
+ """
207
+ worker_info = torch.utils.data.get_worker_info()
208
+
209
+ # setup input context to split dataset across distributed processes
210
+ num_workers = 1
211
+ global_worker_id = 0
212
+ if worker_info is not None:
213
+ self.worker_info = worker_info
214
+ self.worker_seed = worker_info.seed
215
+ self.num_workers = worker_info.num_workers
216
+ self.global_num_workers = self.dist_num_replicas * self.num_workers
217
+ global_worker_id = self.dist_rank * self.num_workers + worker_info.id
218
+
219
+ """ Data sharding
220
+ InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used.
221
+ My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True)
222
+ between the splits each iteration, but that understanding could be wrong.
223
+
224
+ I am currently using a mix of InputContext shard assignment and fine-grained sub-splits for distributing
225
+ the data across workers. For training InputContext is used to assign shards to nodes unless num_shards
226
+ in dataset < total number of workers. Otherwise sub-split API is used for datasets without enough shards or
227
+ for validation where we can't drop samples and need to avoid minimize uneven splits to avoid padding.
228
+ """
229
+ should_subsplit = self.global_num_workers > 1 and (
230
+ self.split_info.num_shards < self.global_num_workers or not self.is_training)
231
+ if should_subsplit:
232
+ # split the dataset w/o using sharding for more even samples / worker, can result in less optimal
233
+ # read patterns for distributed training (overlap across shards) so better to use InputContext there
234
+ if has_buggy_even_splits:
235
+ # my even_split workaround doesn't work on subsplits, upgrade tfds!
236
+ if not isinstance(self.split_info, tfds.core.splits.SubSplitInfo):
237
+ subsplits = even_split_indices(self.split, self.global_num_workers, self.num_samples)
238
+ self.subsplit = subsplits[global_worker_id]
239
+ else:
240
+ subsplits = tfds.even_splits(self.split, self.global_num_workers)
241
+ self.subsplit = subsplits[global_worker_id]
242
+
243
+ input_context = None
244
+ if self.global_num_workers > 1 and self.subsplit is None:
245
+ # set input context to divide shards among distributed replicas
246
+ input_context = tf.distribute.InputContext(
247
+ num_input_pipelines=self.global_num_workers,
248
+ input_pipeline_id=global_worker_id,
249
+ num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact?
250
+ )
251
+ read_config = tfds.ReadConfig(
252
+ shuffle_seed=self.common_seed + self.epoch_count.value,
253
+ shuffle_reshuffle_each_iteration=True,
254
+ input_context=input_context,
255
+ )
256
+ ds = self.builder.as_dataset(
257
+ split=self.subsplit or self.split,
258
+ shuffle_files=self.is_training,
259
+ decoders=dict(image=decode_example(channels=1 if self.input_img_mode == 'L' else 3)),
260
+ read_config=read_config,
261
+ )
262
+ # avoid overloading threading w/ combo of TF ds threads + PyTorch workers
263
+ options = tf.data.Options()
264
+ thread_member = 'threading' if hasattr(options, 'threading') else 'experimental_threading'
265
+ getattr(options, thread_member).private_threadpool_size = max(1, self.max_threadpool_size // self.num_workers)
266
+ getattr(options, thread_member).max_intra_op_parallelism = 1
267
+ ds = ds.with_options(options)
268
+ if self.is_training or self.repeats > 1:
269
+ # to prevent excessive drop_last batch behaviour w/ IterableDatasets
270
+ # see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading
271
+ ds = ds.repeat() # allow wrap around and break iteration manually
272
+ if self.is_training:
273
+ ds = ds.shuffle(min(self.num_samples, self.shuffle_size) // self.global_num_workers, seed=self.worker_seed)
274
+ ds = ds.prefetch(min(self.num_samples // self.global_num_workers, self.prefetch_size))
275
+ self.ds = tfds.as_numpy(ds)
276
+ self.init_count += 1
277
+
278
+ def _num_samples_per_worker(self):
279
+ num_worker_samples = \
280
+ max(1, self.repeats) * self.num_samples / max(self.global_num_workers, self.dist_num_replicas)
281
+ if self.is_training or self.dist_num_replicas > 1:
282
+ num_worker_samples = math.ceil(num_worker_samples)
283
+ if self.is_training:
284
+ num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size
285
+ return int(num_worker_samples)
286
+
287
+ def __iter__(self):
288
+ if self.ds is None or self.reinit_each_iter:
289
+ self._lazy_init()
290
+
291
+ # Compute a rounded up sample count that is used to:
292
+ # 1. make batches even cross workers & replicas in distributed validation.
293
+ # This adds extra samples and will slightly alter validation results.
294
+ # 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size
295
+ # batches are produced (underlying tfds iter wraps around)
296
+ target_sample_count = self._num_samples_per_worker()
297
+
298
+ # Iterate until exhausted or sample count hits target when training (ds.repeat enabled)
299
+ sample_count = 0
300
+ for sample in self.ds:
301
+ input_data = sample[self.input_key]
302
+ if self.input_img_mode:
303
+ if self.input_img_mode == 'L' and input_data.ndim == 3:
304
+ input_data = input_data[:, :, 0]
305
+ input_data = Image.fromarray(input_data, mode=self.input_img_mode)
306
+ target_data = sample[self.target_key]
307
+ if self.target_img_mode:
308
+ # dense pixel target
309
+ target_data = Image.fromarray(target_data, mode=self.target_img_mode)
310
+ elif self.remap_class:
311
+ target_data = self.class_to_idx[target_data]
312
+ yield input_data, target_data
313
+ sample_count += 1
314
+ if self.is_training and sample_count >= target_sample_count:
315
+ # Need to break out of loop when repeat() is enabled for training w/ oversampling
316
+ # this results in extra samples per epoch but seems more desirable than dropping
317
+ # up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes)
318
+ break
319
+
320
+ # Pad across distributed nodes (make counts equal by adding samples)
321
+ if not self.is_training and self.dist_num_replicas > 1 and self.subsplit is not None and \
322
+ 0 < sample_count < target_sample_count:
323
+ # Validation batch padding only done for distributed training where results are reduced across nodes.
324
+ # For single process case, it won't matter if workers return different batch sizes.
325
+ # If using input_context or % based splits, sample count can vary significantly across workers and this
326
+ # approach should not be used (hence disabled if self.subsplit isn't set).
327
+ while sample_count < target_sample_count:
328
+ yield input_data, target_data # yield prev sample again
329
+ sample_count += 1
330
+
331
+ def __len__(self):
332
+ num_samples = self._num_samples_per_worker() * self.num_workers
333
+ return num_samples
334
+
335
+ def _filename(self, index, basename=False, absolute=False):
336
+ assert False, "Not supported" # no random access to samples
337
+
338
+ def filenames(self, basename=False, absolute=False):
339
+ """ Return all filenames in dataset, overrides base"""
340
+ if self.ds is None:
341
+ self._lazy_init()
342
+ names = []
343
+ for sample in self.ds:
344
+ if len(names) > self.num_samples:
345
+ break # safety for ds.repeat() case
346
+ if 'file_name' in sample:
347
+ name = sample['file_name']
348
+ elif 'filename' in sample:
349
+ name = sample['filename']
350
+ elif 'id' in sample:
351
+ name = sample['id']
352
+ else:
353
+ assert False, "No supported name field present"
354
+ names.append(name)
355
+ return names
pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (5.75 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc ADDED
Binary file (7.78 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc ADDED
Binary file (7.24 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc ADDED
Binary file (9.45 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc ADDED
Binary file (3.02 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc ADDED
Binary file (6.32 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc ADDED
Binary file (3.77 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc ADDED
Binary file (1.48 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc ADDED
Binary file (1.66 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc ADDED
Binary file (1.24 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc ADDED
Binary file (7.07 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc ADDED
Binary file (1.04 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc ADDED
Binary file (6.93 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc ADDED
Binary file (8.33 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc ADDED
Binary file (2.07 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc ADDED
Binary file (12.9 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc ADDED
Binary file (2.97 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc ADDED
Binary file (5.76 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc ADDED
Binary file (2.06 kB). View file
 
pytorch-image-models/timm/layers/adaptive_avgmax_pool.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch selectable adaptive pooling
2
+ Adaptive pooling with the ability to select the type of pooling from:
3
+ * 'avg' - Average pooling
4
+ * 'max' - Max pooling
5
+ * 'avgmax' - Sum of average and max pooling re-scaled by 0.5
6
+ * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
7
+
8
+ Both a functional and a nn.Module version of the pooling is provided.
9
+
10
+ Hacked together by / Copyright 2020 Ross Wightman
11
+ """
12
+ from typing import Optional, Tuple, Union
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+
18
+ from .format import get_spatial_dim, get_channel_dim
19
+
20
+ _int_tuple_2_t = Union[int, Tuple[int, int]]
21
+
22
+
23
+ def adaptive_pool_feat_mult(pool_type='avg'):
24
+ if pool_type.endswith('catavgmax'):
25
+ return 2
26
+ else:
27
+ return 1
28
+
29
+
30
+ def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1):
31
+ x_avg = F.adaptive_avg_pool2d(x, output_size)
32
+ x_max = F.adaptive_max_pool2d(x, output_size)
33
+ return 0.5 * (x_avg + x_max)
34
+
35
+
36
+ def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1):
37
+ x_avg = F.adaptive_avg_pool2d(x, output_size)
38
+ x_max = F.adaptive_max_pool2d(x, output_size)
39
+ return torch.cat((x_avg, x_max), 1)
40
+
41
+
42
+ def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1):
43
+ """Selectable global pooling function with dynamic input kernel size
44
+ """
45
+ if pool_type == 'avg':
46
+ x = F.adaptive_avg_pool2d(x, output_size)
47
+ elif pool_type == 'avgmax':
48
+ x = adaptive_avgmax_pool2d(x, output_size)
49
+ elif pool_type == 'catavgmax':
50
+ x = adaptive_catavgmax_pool2d(x, output_size)
51
+ elif pool_type == 'max':
52
+ x = F.adaptive_max_pool2d(x, output_size)
53
+ else:
54
+ assert False, 'Invalid pool type: %s' % pool_type
55
+ return x
56
+
57
+
58
+ class FastAdaptiveAvgPool(nn.Module):
59
+ def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'):
60
+ super(FastAdaptiveAvgPool, self).__init__()
61
+ self.flatten = flatten
62
+ self.dim = get_spatial_dim(input_fmt)
63
+
64
+ def forward(self, x):
65
+ return x.mean(self.dim, keepdim=not self.flatten)
66
+
67
+
68
+ class FastAdaptiveMaxPool(nn.Module):
69
+ def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
70
+ super(FastAdaptiveMaxPool, self).__init__()
71
+ self.flatten = flatten
72
+ self.dim = get_spatial_dim(input_fmt)
73
+
74
+ def forward(self, x):
75
+ return x.amax(self.dim, keepdim=not self.flatten)
76
+
77
+
78
+ class FastAdaptiveAvgMaxPool(nn.Module):
79
+ def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
80
+ super(FastAdaptiveAvgMaxPool, self).__init__()
81
+ self.flatten = flatten
82
+ self.dim = get_spatial_dim(input_fmt)
83
+
84
+ def forward(self, x):
85
+ x_avg = x.mean(self.dim, keepdim=not self.flatten)
86
+ x_max = x.amax(self.dim, keepdim=not self.flatten)
87
+ return 0.5 * x_avg + 0.5 * x_max
88
+
89
+
90
+ class FastAdaptiveCatAvgMaxPool(nn.Module):
91
+ def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
92
+ super(FastAdaptiveCatAvgMaxPool, self).__init__()
93
+ self.flatten = flatten
94
+ self.dim_reduce = get_spatial_dim(input_fmt)
95
+ if flatten:
96
+ self.dim_cat = 1
97
+ else:
98
+ self.dim_cat = get_channel_dim(input_fmt)
99
+
100
+ def forward(self, x):
101
+ x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten)
102
+ x_max = x.amax(self.dim_reduce, keepdim=not self.flatten)
103
+ return torch.cat((x_avg, x_max), self.dim_cat)
104
+
105
+
106
+ class AdaptiveAvgMaxPool2d(nn.Module):
107
+ def __init__(self, output_size: _int_tuple_2_t = 1):
108
+ super(AdaptiveAvgMaxPool2d, self).__init__()
109
+ self.output_size = output_size
110
+
111
+ def forward(self, x):
112
+ return adaptive_avgmax_pool2d(x, self.output_size)
113
+
114
+
115
+ class AdaptiveCatAvgMaxPool2d(nn.Module):
116
+ def __init__(self, output_size: _int_tuple_2_t = 1):
117
+ super(AdaptiveCatAvgMaxPool2d, self).__init__()
118
+ self.output_size = output_size
119
+
120
+ def forward(self, x):
121
+ return adaptive_catavgmax_pool2d(x, self.output_size)
122
+
123
+
124
+ class SelectAdaptivePool2d(nn.Module):
125
+ """Selectable global pooling layer with dynamic input kernel size
126
+ """
127
+ def __init__(
128
+ self,
129
+ output_size: _int_tuple_2_t = 1,
130
+ pool_type: str = 'fast',
131
+ flatten: bool = False,
132
+ input_fmt: str = 'NCHW',
133
+ ):
134
+ super(SelectAdaptivePool2d, self).__init__()
135
+ assert input_fmt in ('NCHW', 'NHWC')
136
+ self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing
137
+ pool_type = pool_type.lower()
138
+ if not pool_type:
139
+ self.pool = nn.Identity() # pass through
140
+ self.flatten = nn.Flatten(1) if flatten else nn.Identity()
141
+ elif pool_type.startswith('fast') or input_fmt != 'NCHW':
142
+ assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.'
143
+ if pool_type.endswith('catavgmax'):
144
+ self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt)
145
+ elif pool_type.endswith('avgmax'):
146
+ self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt)
147
+ elif pool_type.endswith('max'):
148
+ self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt)
149
+ elif pool_type == 'fast' or pool_type.endswith('avg'):
150
+ self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt)
151
+ else:
152
+ assert False, 'Invalid pool type: %s' % pool_type
153
+ self.flatten = nn.Identity()
154
+ else:
155
+ assert input_fmt == 'NCHW'
156
+ if pool_type == 'avgmax':
157
+ self.pool = AdaptiveAvgMaxPool2d(output_size)
158
+ elif pool_type == 'catavgmax':
159
+ self.pool = AdaptiveCatAvgMaxPool2d(output_size)
160
+ elif pool_type == 'max':
161
+ self.pool = nn.AdaptiveMaxPool2d(output_size)
162
+ elif pool_type == 'avg':
163
+ self.pool = nn.AdaptiveAvgPool2d(output_size)
164
+ else:
165
+ assert False, 'Invalid pool type: %s' % pool_type
166
+ self.flatten = nn.Flatten(1) if flatten else nn.Identity()
167
+
168
+ def is_identity(self):
169
+ return not self.pool_type
170
+
171
+ def forward(self, x):
172
+ x = self.pool(x)
173
+ x = self.flatten(x)
174
+ return x
175
+
176
+ def feat_mult(self):
177
+ return adaptive_pool_feat_mult(self.pool_type)
178
+
179
+ def __repr__(self):
180
+ return self.__class__.__name__ + '(' \
181
+ + 'pool_type=' + self.pool_type \
182
+ + ', flatten=' + str(self.flatten) + ')'
183
+
pytorch-image-models/timm/layers/attention_pool.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+ from .config import use_fused_attn
8
+ from .mlp import Mlp
9
+ from .weight_init import trunc_normal_tf_
10
+
11
+
12
+ class AttentionPoolLatent(nn.Module):
13
+ """ Attention pooling w/ latent query
14
+ """
15
+ fused_attn: torch.jit.Final[bool]
16
+
17
+ def __init__(
18
+ self,
19
+ in_features: int,
20
+ out_features: int = None,
21
+ embed_dim: int = None,
22
+ num_heads: int = 8,
23
+ feat_size: Optional[int] = None,
24
+ mlp_ratio: float = 4.0,
25
+ qkv_bias: bool = True,
26
+ qk_norm: bool = False,
27
+ latent_len: int = 1,
28
+ latent_dim: int = None,
29
+ pos_embed: str = '',
30
+ pool_type: str = 'token',
31
+ norm_layer: Optional[nn.Module] = None,
32
+ drop: float = 0.0,
33
+ ):
34
+ super().__init__()
35
+ embed_dim = embed_dim or in_features
36
+ out_features = out_features or in_features
37
+ assert embed_dim % num_heads == 0
38
+ self.num_heads = num_heads
39
+ self.head_dim = embed_dim // num_heads
40
+ self.feat_size = feat_size
41
+ self.scale = self.head_dim ** -0.5
42
+ self.pool = pool_type
43
+ self.fused_attn = use_fused_attn()
44
+
45
+ if pos_embed == 'abs':
46
+ assert feat_size is not None
47
+ self.pos_embed = nn.Parameter(torch.zeros(feat_size, in_features))
48
+ else:
49
+ self.pos_embed = None
50
+
51
+ self.latent_dim = latent_dim or embed_dim
52
+ self.latent_len = latent_len
53
+ self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim))
54
+
55
+ self.q = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
56
+ self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=qkv_bias)
57
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
58
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
59
+ self.proj = nn.Linear(embed_dim, embed_dim)
60
+ self.proj_drop = nn.Dropout(drop)
61
+
62
+ self.norm = norm_layer(out_features) if norm_layer is not None else nn.Identity()
63
+ self.mlp = Mlp(embed_dim, int(embed_dim * mlp_ratio))
64
+
65
+ self.init_weights()
66
+
67
+ def init_weights(self):
68
+ if self.pos_embed is not None:
69
+ trunc_normal_tf_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5)
70
+ trunc_normal_tf_(self.latent, std=self.latent_dim ** -0.5)
71
+
72
+ def forward(self, x):
73
+ B, N, C = x.shape
74
+
75
+ if self.pos_embed is not None:
76
+ # FIXME interpolate
77
+ x = x + self.pos_embed.unsqueeze(0).to(x.dtype)
78
+
79
+ q_latent = self.latent.expand(B, -1, -1)
80
+ q = self.q(q_latent).reshape(B, self.latent_len, self.num_heads, self.head_dim).transpose(1, 2)
81
+
82
+ kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
83
+ k, v = kv.unbind(0)
84
+
85
+ q, k = self.q_norm(q), self.k_norm(k)
86
+
87
+ if self.fused_attn:
88
+ x = F.scaled_dot_product_attention(q, k, v)
89
+ else:
90
+ q = q * self.scale
91
+ attn = q @ k.transpose(-2, -1)
92
+ attn = attn.softmax(dim=-1)
93
+ x = attn @ v
94
+ x = x.transpose(1, 2).reshape(B, self.latent_len, C)
95
+ x = self.proj(x)
96
+ x = self.proj_drop(x)
97
+
98
+ x = x + self.mlp(self.norm(x))
99
+
100
+ # optional pool if latent seq_len > 1 and pooled output is desired
101
+ if self.pool == 'token':
102
+ x = x[:, 0]
103
+ elif self.pool == 'avg':
104
+ x = x.mean(1)
105
+ return x
pytorch-image-models/timm/layers/attention_pool2d.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Attention Pool 2D
2
+
3
+ Implementations of 2D spatial feature pooling using multi-head attention instead of average pool.
4
+
5
+ Based on idea in CLIP by OpenAI, licensed Apache 2.0
6
+ https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
7
+
8
+ Hacked together by / Copyright 2021 Ross Wightman
9
+ """
10
+ from typing import Optional, Union, Tuple
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+
15
+ from. config import use_fused_attn
16
+ from .helpers import to_2tuple
17
+ from .pos_embed import resample_abs_pos_embed
18
+ from .pos_embed_sincos import apply_rot_embed, RotaryEmbedding
19
+ from .weight_init import trunc_normal_
20
+
21
+
22
+ class RotAttentionPool2d(nn.Module):
23
+ """ Attention based 2D feature pooling w/ rotary (relative) pos embedding.
24
+ This is a multi-head attention based replacement for (spatial) average pooling in NN architectures.
25
+
26
+ Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed.
27
+ https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
28
+
29
+ NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from
30
+ train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW
31
+ """
32
+ fused_attn: torch.jit.Final[bool]
33
+
34
+ def __init__(
35
+ self,
36
+ in_features: int,
37
+ out_features: Optional[int] = None,
38
+ ref_feat_size: Union[int, Tuple[int, int]] = 7,
39
+ embed_dim: Optional[int] = None,
40
+ head_dim: Optional[int] = 64,
41
+ num_heads: Optional[int] = None,
42
+ qkv_bias: bool = True,
43
+ qkv_separate: bool = False,
44
+ pool_type: str = 'token',
45
+ class_token: bool = False,
46
+ drop_rate: float = 0.,
47
+ ):
48
+ super().__init__()
49
+ assert pool_type in ('', 'token')
50
+ self.embed_dim = embed_dim = embed_dim or in_features
51
+ self.in_features = in_features
52
+ self.out_features = out_features or in_features
53
+ ref_feat_size = to_2tuple(ref_feat_size)
54
+ if num_heads is not None:
55
+ assert embed_dim % num_heads == 0
56
+ head_dim = embed_dim // num_heads
57
+ else:
58
+ assert embed_dim % head_dim == 0
59
+ num_heads = embed_dim // head_dim
60
+ self.num_heads = num_heads
61
+ self.head_dim = head_dim
62
+ self.pool_type = pool_type.lower()
63
+ self.scale = self.head_dim ** -0.5
64
+ self.fused_attn = use_fused_attn()
65
+
66
+ if class_token:
67
+ self.cls_token = nn.Parameter(torch.zeros(1, embed_dim))
68
+ else:
69
+ self.cls_token = None
70
+
71
+ if qkv_separate:
72
+ self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias)
73
+ self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias)
74
+ self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias)
75
+ self.qkv = None
76
+ else:
77
+ self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias)
78
+ self.drop = nn.Dropout(drop_rate)
79
+ self.proj = nn.Linear(embed_dim, self.out_features)
80
+ self.pos_embed = RotaryEmbedding(self.head_dim, in_pixels=False, ref_feat_shape=ref_feat_size)
81
+
82
+ def init_weights(self, zero_init_last: bool = False):
83
+ if self.qkv is None:
84
+ in_features = self.q.in_features
85
+ trunc_normal_(self.q.weight, std=in_features ** -0.5)
86
+ nn.init.zeros_(self.q.bias)
87
+ trunc_normal_(self.k.weight, std=in_features ** -0.5)
88
+ nn.init.zeros_(self.k.bias)
89
+ trunc_normal_(self.v.weight, std=in_features ** -0.5)
90
+ nn.init.zeros_(self.v.bias)
91
+ else:
92
+ in_features = self.qkv.in_features
93
+ trunc_normal_(self.qkv.weight, std=in_features ** -0.5)
94
+ nn.init.zeros_(self.qkv.bias)
95
+
96
+ def reset(self, num_classes: Optional[int] = None, pool_type: Optional[str] = None):
97
+ # NOTE: this module is being used as a head, so need compatible reset()
98
+ if pool_type is not None:
99
+ assert pool_type in ('', 'token')
100
+ self.pool_type = pool_type
101
+ if num_classes is not None:
102
+ self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity()
103
+ self.out_features = num_classes if num_classes > 0 else self.embed_dim
104
+
105
+ def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
106
+ if self.pool_type == 'token':
107
+ x = x[:, 0]
108
+ else:
109
+ # if not pooled, return spatial output without token
110
+ x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2)
111
+ return x
112
+
113
+ def forward(self, x, pre_logits: bool = False):
114
+ B, _, H, W = x.shape
115
+ N = H * W
116
+ x = x.flatten(2).transpose(1, 2)
117
+ if self.cls_token is None:
118
+ x = torch.cat([x.mean(1, keepdim=True), x], dim=1)
119
+ else:
120
+ x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1)
121
+ if self.qkv is None:
122
+ q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
123
+ k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
124
+ v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
125
+ else:
126
+ x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
127
+ q, k, v = x.unbind(0)
128
+
129
+ rse, rce = self.pos_embed.get_embed((H, W))
130
+ q = torch.cat([q[:, :, :1, :], apply_rot_embed(q[:, :, 1:, :], rse, rce)], dim=2).type_as(v)
131
+ k = torch.cat([k[:, :, :1, :], apply_rot_embed(k[:, :, 1:, :], rse, rce)], dim=2).type_as(v)
132
+
133
+ if self.fused_attn:
134
+ x = nn.functional.scaled_dot_product_attention(q, k, v)
135
+ else:
136
+ q = q * self.scale
137
+ attn = q @ k.transpose(-2, -1)
138
+ attn = attn.softmax(dim=-1)
139
+ x = attn @ v
140
+ x = x.transpose(1, 2).reshape(B, N + 1, -1)
141
+ x = self.drop(x)
142
+ if pre_logits:
143
+ x = self._pool(x, H, W)
144
+ return x
145
+ x = self.proj(x)
146
+ x = self._pool(x, H, W)
147
+ return x
148
+
149
+
150
+ class AttentionPool2d(nn.Module):
151
+ """ Attention based 2D feature pooling w/ learned (absolute) pos embedding.
152
+ This is a multi-head attention based replacement for (spatial) average pooling in NN architectures.
153
+
154
+ It was based on impl in CLIP by OpenAI
155
+ https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
156
+
157
+ NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network.
158
+ """
159
+ fused_attn: torch.jit.Final[bool]
160
+
161
+ def __init__(
162
+ self,
163
+ in_features: int,
164
+ feat_size: Union[int, Tuple[int, int]] = 7,
165
+ out_features: Optional[int] = None,
166
+ embed_dim: Optional[int] = None,
167
+ head_dim: Optional[int] = 64,
168
+ num_heads: Optional[int] = None,
169
+ qkv_bias: bool = True,
170
+ qkv_separate: bool = False,
171
+ pool_type: str = 'token',
172
+ class_token: bool = False,
173
+ drop_rate: float = 0.,
174
+ ):
175
+ super().__init__()
176
+ assert pool_type in ('', 'token')
177
+ self.embed_dim = embed_dim = embed_dim or in_features
178
+ self.in_features = in_features
179
+ self.out_features = out_features or in_features
180
+ if num_heads is not None:
181
+ assert embed_dim % num_heads == 0
182
+ head_dim = embed_dim // num_heads
183
+ else:
184
+ assert embed_dim % head_dim == 0
185
+ num_heads = embed_dim // head_dim
186
+ self.feat_size = to_2tuple(feat_size)
187
+ self.seq_len = self.feat_size[0] * self.feat_size[1]
188
+ self.num_heads = num_heads
189
+ self.head_dim = head_dim
190
+ self.pool_type = pool_type
191
+ self.scale = self.head_dim ** -0.5
192
+ self.fused_attn = use_fused_attn()
193
+
194
+ if class_token:
195
+ self.cls_token = nn.Parameter(torch.zeros(1, embed_dim))
196
+ else:
197
+ self.cls_token = None
198
+
199
+ if qkv_separate:
200
+ self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias)
201
+ self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias)
202
+ self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias)
203
+ self.qkv = None
204
+ else:
205
+ self.q = self.k = self.v = None
206
+ self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias)
207
+ self.drop = nn.Dropout(drop_rate)
208
+ self.proj = nn.Linear(embed_dim, self.out_features)
209
+ self.pos_embed = nn.Parameter(torch.zeros(self.seq_len + 1, in_features))
210
+
211
+ self.init_weights()
212
+
213
+ def init_weights(self, zero_init_last: bool = False):
214
+ if self.qkv is None:
215
+ in_features = self.q.in_features
216
+ trunc_normal_(self.q.weight, std=in_features ** -0.5)
217
+ nn.init.zeros_(self.q.bias)
218
+ trunc_normal_(self.k.weight, std=in_features ** -0.5)
219
+ nn.init.zeros_(self.k.bias)
220
+ trunc_normal_(self.v.weight, std=in_features ** -0.5)
221
+ nn.init.zeros_(self.v.bias)
222
+ else:
223
+ in_features = self.qkv.in_features
224
+ trunc_normal_(self.qkv.weight, std=in_features ** -0.5)
225
+ nn.init.zeros_(self.qkv.bias)
226
+ trunc_normal_(self.pos_embed, std=in_features ** -0.5)
227
+
228
+ def reset(self, num_classes: Optional[int] = None, pool_type: Optional[str] = None):
229
+ # NOTE: this module is being used as a head, so need compatible reset()
230
+ if pool_type is not None:
231
+ assert pool_type in ('', 'token')
232
+ self.pool_type = pool_type
233
+ if num_classes is not None:
234
+ self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity()
235
+ self.out_features = num_classes if num_classes > 0 else self.embed_dim
236
+
237
+ def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
238
+ if self.pool_type == 'token':
239
+ x = x[:, 0]
240
+ else:
241
+ # if not pooled, return spatial output without token
242
+ x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2)
243
+ return x
244
+
245
+ def forward(self, x, pre_logits: bool = False):
246
+ B, _, H, W = x.shape
247
+ N = H * W
248
+ x = x.flatten(2).transpose(1, 2)
249
+ if self.cls_token is None:
250
+ x = torch.cat([x.mean(1, keepdim=True), x], dim=1)
251
+ else:
252
+ x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1)
253
+ pos_embed = resample_abs_pos_embed(self.pos_embed.unsqueeze(0), (H, W), num_prefix_tokens=1)
254
+ x = x + pos_embed
255
+
256
+ if self.qkv is None:
257
+ q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
258
+ k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
259
+ v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
260
+ else:
261
+ x = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
262
+ q, k, v = x.unbind(0)
263
+
264
+ if self.fused_attn:
265
+ x = nn.functional.scaled_dot_product_attention(q, k, v)
266
+ else:
267
+ q = q * self.scale
268
+ attn = q @ k.transpose(-2, -1)
269
+ attn = attn.softmax(dim=-1)
270
+ x = attn @ v
271
+ x = x.transpose(1, 2).reshape(B, N + 1, -1)
272
+ x = self.drop(x)
273
+ if pre_logits:
274
+ x = self._pool(x, H, W)
275
+ return x
276
+ x = self.proj(x)
277
+ x = self._pool(x, H, W)
278
+ return x
pytorch-image-models/timm/layers/blur_pool.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ BlurPool layer inspired by
3
+ - Kornia's Max_BlurPool2d
4
+ - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar`
5
+
6
+ Hacked together by Chris Ha and Ross Wightman
7
+ """
8
+ from functools import partial
9
+ from typing import Optional, Type
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ import numpy as np
15
+
16
+ from .padding import get_padding
17
+ from .typing import LayerType
18
+
19
+
20
+ class BlurPool2d(nn.Module):
21
+ r"""Creates a module that computes blurs and downsample a given feature map.
22
+ See :cite:`zhang2019shiftinvar` for more details.
23
+ Corresponds to the Downsample class, which does blurring and subsampling
24
+
25
+ Args:
26
+ channels = Number of input channels
27
+ filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5.
28
+ stride (int): downsampling filter stride
29
+
30
+ Returns:
31
+ torch.Tensor: the transformed tensor.
32
+ """
33
+ def __init__(
34
+ self,
35
+ channels: Optional[int] = None,
36
+ filt_size: int = 3,
37
+ stride: int = 2,
38
+ pad_mode: str = 'reflect',
39
+ ) -> None:
40
+ super(BlurPool2d, self).__init__()
41
+ assert filt_size > 1
42
+ self.channels = channels
43
+ self.filt_size = filt_size
44
+ self.stride = stride
45
+ self.pad_mode = pad_mode
46
+ self.padding = [get_padding(filt_size, stride, dilation=1)] * 4
47
+
48
+ coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32))
49
+ blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :]
50
+ if channels is not None:
51
+ blur_filter = blur_filter.repeat(self.channels, 1, 1, 1)
52
+ self.register_buffer('filt', blur_filter, persistent=False)
53
+
54
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
55
+ x = F.pad(x, self.padding, mode=self.pad_mode)
56
+ if self.channels is None:
57
+ channels = x.shape[1]
58
+ weight = self.filt.expand(channels, 1, self.filt_size, self.filt_size)
59
+ else:
60
+ channels = self.channels
61
+ weight = self.filt
62
+ return F.conv2d(x, weight, stride=self.stride, groups=channels)
63
+
64
+
65
+ def create_aa(
66
+ aa_layer: LayerType,
67
+ channels: Optional[int] = None,
68
+ stride: int = 2,
69
+ enable: bool = True,
70
+ noop: Optional[Type[nn.Module]] = nn.Identity
71
+ ) -> nn.Module:
72
+ """ Anti-aliasing """
73
+ if not aa_layer or not enable:
74
+ return noop() if noop is not None else None
75
+
76
+ if isinstance(aa_layer, str):
77
+ aa_layer = aa_layer.lower().replace('_', '').replace('-', '')
78
+ if aa_layer == 'avg' or aa_layer == 'avgpool':
79
+ aa_layer = nn.AvgPool2d
80
+ elif aa_layer == 'blur' or aa_layer == 'blurpool':
81
+ aa_layer = BlurPool2d
82
+ elif aa_layer == 'blurpc':
83
+ aa_layer = partial(BlurPool2d, pad_mode='constant')
84
+
85
+ else:
86
+ assert False, f"Unknown anti-aliasing layer ({aa_layer})."
87
+
88
+ try:
89
+ return aa_layer(channels=channels, stride=stride)
90
+ except TypeError as e:
91
+ return aa_layer(stride)
pytorch-image-models/timm/layers/bottleneck_attn.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Bottleneck Self Attention (Bottleneck Transformers)
2
+
3
+ Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
4
+
5
+ @misc{2101.11605,
6
+ Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani},
7
+ Title = {Bottleneck Transformers for Visual Recognition},
8
+ Year = {2021},
9
+ }
10
+
11
+ Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
12
+
13
+ This impl is a WIP but given that it is based on the ref gist likely not too far off.
14
+
15
+ Hacked together by / Copyright 2021 Ross Wightman
16
+ """
17
+ from typing import List
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from .helpers import to_2tuple, make_divisible
24
+ from .weight_init import trunc_normal_
25
+ from .trace_utils import _assert
26
+
27
+
28
+ def rel_logits_1d(q, rel_k, permute_mask: List[int]):
29
+ """ Compute relative logits along one dimension
30
+
31
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
32
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
33
+
34
+ Args:
35
+ q: (batch, heads, height, width, dim)
36
+ rel_k: (2 * width - 1, dim)
37
+ permute_mask: permute output dim according to this
38
+ """
39
+ B, H, W, dim = q.shape
40
+ x = (q @ rel_k.transpose(-1, -2))
41
+ x = x.reshape(-1, W, 2 * W -1)
42
+
43
+ # pad to shift from relative to absolute indexing
44
+ x_pad = F.pad(x, [0, 1]).flatten(1)
45
+ x_pad = F.pad(x_pad, [0, W - 1])
46
+
47
+ # reshape and slice out the padded elements
48
+ x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1)
49
+ x = x_pad[:, :W, W - 1:]
50
+
51
+ # reshape and tile
52
+ x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1)
53
+ return x.permute(permute_mask)
54
+
55
+
56
+ class PosEmbedRel(nn.Module):
57
+ """ Relative Position Embedding
58
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
59
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
60
+ """
61
+ def __init__(self, feat_size, dim_head, scale):
62
+ super().__init__()
63
+ self.height, self.width = to_2tuple(feat_size)
64
+ self.dim_head = dim_head
65
+ self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale)
66
+ self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale)
67
+
68
+ def forward(self, q):
69
+ B, HW, _ = q.shape
70
+
71
+ # relative logits in width dimension.
72
+ q = q.reshape(B, self.height, self.width, -1)
73
+ rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
74
+
75
+ # relative logits in height dimension.
76
+ q = q.transpose(1, 2)
77
+ rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
78
+
79
+ rel_logits = rel_logits_h + rel_logits_w
80
+ rel_logits = rel_logits.reshape(B, HW, HW)
81
+ return rel_logits
82
+
83
+
84
+ class BottleneckAttn(nn.Module):
85
+ """ Bottleneck Attention
86
+ Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
87
+
88
+ The internal dimensions of the attention module are controlled by the interaction of several arguments.
89
+ * the output dimension of the module is specified by dim_out, which falls back to input dim if not set
90
+ * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
91
+ * the query and key (qk) dimensions are determined by
92
+ * num_heads * dim_head if dim_head is not None
93
+ * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
94
+ * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
95
+
96
+ Args:
97
+ dim (int): input dimension to the module
98
+ dim_out (int): output dimension of the module, same as dim if not set
99
+ stride (int): output stride of the module, avg pool used if stride == 2 (default: 1).
100
+ num_heads (int): parallel attention heads (default: 4)
101
+ dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
102
+ qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
103
+ qkv_bias (bool): add bias to q, k, and v projections
104
+ scale_pos_embed (bool): scale the position embedding as well as Q @ K
105
+ """
106
+ def __init__(
107
+ self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None,
108
+ qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False):
109
+ super().__init__()
110
+ assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required'
111
+ dim_out = dim_out or dim
112
+ assert dim_out % num_heads == 0
113
+ self.num_heads = num_heads
114
+ self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
115
+ self.dim_head_v = dim_out // self.num_heads
116
+ self.dim_out_qk = num_heads * self.dim_head_qk
117
+ self.dim_out_v = num_heads * self.dim_head_v
118
+ self.scale = self.dim_head_qk ** -0.5
119
+ self.scale_pos_embed = scale_pos_embed
120
+
121
+ self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias)
122
+
123
+ # NOTE I'm only supporting relative pos embedding for now
124
+ self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale)
125
+
126
+ self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity()
127
+
128
+ self.reset_parameters()
129
+
130
+ def reset_parameters(self):
131
+ trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in
132
+ trunc_normal_(self.pos_embed.height_rel, std=self.scale)
133
+ trunc_normal_(self.pos_embed.width_rel, std=self.scale)
134
+
135
+ def forward(self, x):
136
+ B, C, H, W = x.shape
137
+ _assert(H == self.pos_embed.height, '')
138
+ _assert(W == self.pos_embed.width, '')
139
+
140
+ x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W
141
+
142
+ # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v
143
+ # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted.
144
+ q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1)
145
+ q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2)
146
+ k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k
147
+ v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2)
148
+
149
+ if self.scale_pos_embed:
150
+ attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W
151
+ else:
152
+ attn = (q @ k) * self.scale + self.pos_embed(q)
153
+ attn = attn.softmax(dim=-1)
154
+
155
+ out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W
156
+ out = self.pool(out)
157
+ return out
pytorch-image-models/timm/layers/cbam.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ CBAM (sort-of) Attention
2
+
3
+ Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521
4
+
5
+ WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on
6
+ some tasks, especially fine-grained it seems. I may end up removing this impl.
7
+
8
+ Hacked together by / Copyright 2020 Ross Wightman
9
+ """
10
+ import torch
11
+ from torch import nn as nn
12
+ import torch.nn.functional as F
13
+
14
+ from .conv_bn_act import ConvNormAct
15
+ from .create_act import create_act_layer, get_act_layer
16
+ from .helpers import make_divisible
17
+
18
+
19
+ class ChannelAttn(nn.Module):
20
+ """ Original CBAM channel attention module, currently avg + max pool variant only.
21
+ """
22
+ def __init__(
23
+ self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
24
+ act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
25
+ super(ChannelAttn, self).__init__()
26
+ if not rd_channels:
27
+ rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
28
+ self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias)
29
+ self.act = act_layer(inplace=True)
30
+ self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias)
31
+ self.gate = create_act_layer(gate_layer)
32
+
33
+ def forward(self, x):
34
+ x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True))))
35
+ x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True))))
36
+ return x * self.gate(x_avg + x_max)
37
+
38
+
39
+ class LightChannelAttn(ChannelAttn):
40
+ """An experimental 'lightweight' that sums avg + max pool first
41
+ """
42
+ def __init__(
43
+ self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
44
+ act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
45
+ super(LightChannelAttn, self).__init__(
46
+ channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias)
47
+
48
+ def forward(self, x):
49
+ x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True)
50
+ x_attn = self.fc2(self.act(self.fc1(x_pool)))
51
+ return x * F.sigmoid(x_attn)
52
+
53
+
54
+ class SpatialAttn(nn.Module):
55
+ """ Original CBAM spatial attention module
56
+ """
57
+ def __init__(self, kernel_size=7, gate_layer='sigmoid'):
58
+ super(SpatialAttn, self).__init__()
59
+ self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False)
60
+ self.gate = create_act_layer(gate_layer)
61
+
62
+ def forward(self, x):
63
+ x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1)
64
+ x_attn = self.conv(x_attn)
65
+ return x * self.gate(x_attn)
66
+
67
+
68
+ class LightSpatialAttn(nn.Module):
69
+ """An experimental 'lightweight' variant that sums avg_pool and max_pool results.
70
+ """
71
+ def __init__(self, kernel_size=7, gate_layer='sigmoid'):
72
+ super(LightSpatialAttn, self).__init__()
73
+ self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False)
74
+ self.gate = create_act_layer(gate_layer)
75
+
76
+ def forward(self, x):
77
+ x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True)
78
+ x_attn = self.conv(x_attn)
79
+ return x * self.gate(x_attn)
80
+
81
+
82
+ class CbamModule(nn.Module):
83
+ def __init__(
84
+ self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
85
+ spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
86
+ super(CbamModule, self).__init__()
87
+ self.channel = ChannelAttn(
88
+ channels, rd_ratio=rd_ratio, rd_channels=rd_channels,
89
+ rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias)
90
+ self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer)
91
+
92
+ def forward(self, x):
93
+ x = self.channel(x)
94
+ x = self.spatial(x)
95
+ return x
96
+
97
+
98
+ class LightCbamModule(nn.Module):
99
+ def __init__(
100
+ self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1,
101
+ spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False):
102
+ super(LightCbamModule, self).__init__()
103
+ self.channel = LightChannelAttn(
104
+ channels, rd_ratio=rd_ratio, rd_channels=rd_channels,
105
+ rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias)
106
+ self.spatial = LightSpatialAttn(spatial_kernel_size)
107
+
108
+ def forward(self, x):
109
+ x = self.channel(x)
110
+ x = self.spatial(x)
111
+ return x
112
+
pytorch-image-models/timm/layers/classifier.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Classifier head and layer factory
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ from collections import OrderedDict
6
+ from functools import partial
7
+ from typing import Optional, Union, Callable
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.nn import functional as F
12
+
13
+ from .adaptive_avgmax_pool import SelectAdaptivePool2d
14
+ from .create_act import get_act_layer
15
+ from .create_norm import get_norm_layer
16
+
17
+
18
+ def _create_pool(
19
+ num_features: int,
20
+ num_classes: int,
21
+ pool_type: str = 'avg',
22
+ use_conv: bool = False,
23
+ input_fmt: Optional[str] = None,
24
+ ):
25
+ flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
26
+ if not pool_type:
27
+ flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
28
+ global_pool = SelectAdaptivePool2d(
29
+ pool_type=pool_type,
30
+ flatten=flatten_in_pool,
31
+ input_fmt=input_fmt,
32
+ )
33
+ num_pooled_features = num_features * global_pool.feat_mult()
34
+ return global_pool, num_pooled_features
35
+
36
+
37
+ def _create_fc(num_features, num_classes, use_conv=False):
38
+ if num_classes <= 0:
39
+ fc = nn.Identity() # pass-through (no classifier)
40
+ elif use_conv:
41
+ fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
42
+ else:
43
+ fc = nn.Linear(num_features, num_classes, bias=True)
44
+ return fc
45
+
46
+
47
+ def create_classifier(
48
+ num_features: int,
49
+ num_classes: int,
50
+ pool_type: str = 'avg',
51
+ use_conv: bool = False,
52
+ input_fmt: str = 'NCHW',
53
+ drop_rate: Optional[float] = None,
54
+ ):
55
+ global_pool, num_pooled_features = _create_pool(
56
+ num_features,
57
+ num_classes,
58
+ pool_type,
59
+ use_conv=use_conv,
60
+ input_fmt=input_fmt,
61
+ )
62
+ fc = _create_fc(
63
+ num_pooled_features,
64
+ num_classes,
65
+ use_conv=use_conv,
66
+ )
67
+ if drop_rate is not None:
68
+ dropout = nn.Dropout(drop_rate)
69
+ return global_pool, dropout, fc
70
+ return global_pool, fc
71
+
72
+
73
+ class ClassifierHead(nn.Module):
74
+ """Classifier head w/ configurable global pooling and dropout."""
75
+
76
+ def __init__(
77
+ self,
78
+ in_features: int,
79
+ num_classes: int,
80
+ pool_type: str = 'avg',
81
+ drop_rate: float = 0.,
82
+ use_conv: bool = False,
83
+ input_fmt: str = 'NCHW',
84
+ ):
85
+ """
86
+ Args:
87
+ in_features: The number of input features.
88
+ num_classes: The number of classes for the final classifier layer (output).
89
+ pool_type: Global pooling type, pooling disabled if empty string ('').
90
+ drop_rate: Pre-classifier dropout rate.
91
+ """
92
+ super(ClassifierHead, self).__init__()
93
+ self.in_features = in_features
94
+ self.use_conv = use_conv
95
+ self.input_fmt = input_fmt
96
+
97
+ global_pool, fc = create_classifier(
98
+ in_features,
99
+ num_classes,
100
+ pool_type,
101
+ use_conv=use_conv,
102
+ input_fmt=input_fmt,
103
+ )
104
+ self.global_pool = global_pool
105
+ self.drop = nn.Dropout(drop_rate)
106
+ self.fc = fc
107
+ self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()
108
+
109
+ def reset(self, num_classes: int, pool_type: Optional[str] = None):
110
+ if pool_type is not None and pool_type != self.global_pool.pool_type:
111
+ self.global_pool, self.fc = create_classifier(
112
+ self.in_features,
113
+ num_classes,
114
+ pool_type=pool_type,
115
+ use_conv=self.use_conv,
116
+ input_fmt=self.input_fmt,
117
+ )
118
+ self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity()
119
+ else:
120
+ num_pooled_features = self.in_features * self.global_pool.feat_mult()
121
+ self.fc = _create_fc(
122
+ num_pooled_features,
123
+ num_classes,
124
+ use_conv=self.use_conv,
125
+ )
126
+
127
+ def forward(self, x, pre_logits: bool = False):
128
+ x = self.global_pool(x)
129
+ x = self.drop(x)
130
+ if pre_logits:
131
+ return self.flatten(x)
132
+ x = self.fc(x)
133
+ return self.flatten(x)
134
+
135
+
136
+ class NormMlpClassifierHead(nn.Module):
137
+ """ A Pool -> Norm -> Mlp Classifier Head for '2D' NCHW tensors
138
+ """
139
+ def __init__(
140
+ self,
141
+ in_features: int,
142
+ num_classes: int,
143
+ hidden_size: Optional[int] = None,
144
+ pool_type: str = 'avg',
145
+ drop_rate: float = 0.,
146
+ norm_layer: Union[str, Callable] = 'layernorm2d',
147
+ act_layer: Union[str, Callable] = 'tanh',
148
+ ):
149
+ """
150
+ Args:
151
+ in_features: The number of input features.
152
+ num_classes: The number of classes for the final classifier layer (output).
153
+ hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
154
+ pool_type: Global pooling type, pooling disabled if empty string ('').
155
+ drop_rate: Pre-classifier dropout rate.
156
+ norm_layer: Normalization layer type.
157
+ act_layer: MLP activation layer type (only used if hidden_size is not None).
158
+ """
159
+ super().__init__()
160
+ self.in_features = in_features
161
+ self.hidden_size = hidden_size
162
+ self.num_features = in_features
163
+ self.use_conv = not pool_type
164
+ norm_layer = get_norm_layer(norm_layer)
165
+ act_layer = get_act_layer(act_layer)
166
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
167
+
168
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
169
+ self.norm = norm_layer(in_features)
170
+ self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
171
+ if hidden_size:
172
+ self.pre_logits = nn.Sequential(OrderedDict([
173
+ ('fc', linear_layer(in_features, hidden_size)),
174
+ ('act', act_layer()),
175
+ ]))
176
+ self.num_features = hidden_size
177
+ else:
178
+ self.pre_logits = nn.Identity()
179
+ self.drop = nn.Dropout(drop_rate)
180
+ self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
181
+
182
+ def reset(self, num_classes: int, pool_type: Optional[str] = None):
183
+ if pool_type is not None:
184
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
185
+ self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
186
+ self.use_conv = self.global_pool.is_identity()
187
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
188
+ if self.hidden_size:
189
+ if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or
190
+ (isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)):
191
+ with torch.no_grad():
192
+ new_fc = linear_layer(self.in_features, self.hidden_size)
193
+ new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape))
194
+ new_fc.bias.copy_(self.pre_logits.fc.bias)
195
+ self.pre_logits.fc = new_fc
196
+ self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
197
+
198
+ def forward(self, x, pre_logits: bool = False):
199
+ x = self.global_pool(x)
200
+ x = self.norm(x)
201
+ x = self.flatten(x)
202
+ x = self.pre_logits(x)
203
+ x = self.drop(x)
204
+ if pre_logits:
205
+ return x
206
+ x = self.fc(x)
207
+ return x
208
+
209
+
210
+ class ClNormMlpClassifierHead(nn.Module):
211
+ """ A Pool -> Norm -> Mlp Classifier Head for n-D NxxC tensors
212
+ """
213
+ def __init__(
214
+ self,
215
+ in_features: int,
216
+ num_classes: int,
217
+ hidden_size: Optional[int] = None,
218
+ pool_type: str = 'avg',
219
+ drop_rate: float = 0.,
220
+ norm_layer: Union[str, Callable] = 'layernorm',
221
+ act_layer: Union[str, Callable] = 'gelu',
222
+ input_fmt: str = 'NHWC',
223
+ ):
224
+ """
225
+ Args:
226
+ in_features: The number of input features.
227
+ num_classes: The number of classes for the final classifier layer (output).
228
+ hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
229
+ pool_type: Global pooling type, pooling disabled if empty string ('').
230
+ drop_rate: Pre-classifier dropout rate.
231
+ norm_layer: Normalization layer type.
232
+ act_layer: MLP activation layer type (only used if hidden_size is not None).
233
+ """
234
+ super().__init__()
235
+ self.in_features = in_features
236
+ self.hidden_size = hidden_size
237
+ self.num_features = in_features
238
+ assert pool_type in ('', 'avg', 'max', 'avgmax')
239
+ self.pool_type = pool_type
240
+ assert input_fmt in ('NHWC', 'NLC')
241
+ self.pool_dim = 1 if input_fmt == 'NLC' else (1, 2)
242
+ norm_layer = get_norm_layer(norm_layer)
243
+ act_layer = get_act_layer(act_layer)
244
+
245
+ self.norm = norm_layer(in_features)
246
+ if hidden_size:
247
+ self.pre_logits = nn.Sequential(OrderedDict([
248
+ ('fc', nn.Linear(in_features, hidden_size)),
249
+ ('act', act_layer()),
250
+ ]))
251
+ self.num_features = hidden_size
252
+ else:
253
+ self.pre_logits = nn.Identity()
254
+ self.drop = nn.Dropout(drop_rate)
255
+ self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
256
+
257
+ def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False):
258
+ if pool_type is not None:
259
+ self.pool_type = pool_type
260
+ if reset_other:
261
+ self.pre_logits = nn.Identity()
262
+ self.norm = nn.Identity()
263
+ self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
264
+
265
+ def _global_pool(self, x):
266
+ if self.pool_type:
267
+ if self.pool_type == 'avg':
268
+ x = x.mean(dim=self.pool_dim)
269
+ elif self.pool_type == 'max':
270
+ x = x.amax(dim=self.pool_dim)
271
+ elif self.pool_type == 'avgmax':
272
+ x = 0.5 * (x.amax(dim=self.pool_dim) + x.mean(dim=self.pool_dim))
273
+ return x
274
+
275
+ def forward(self, x, pre_logits: bool = False):
276
+ x = self._global_pool(x)
277
+ x = self.norm(x)
278
+ x = self.pre_logits(x)
279
+ x = self.drop(x)
280
+ if pre_logits:
281
+ return x
282
+ x = self.fc(x)
283
+ return x
pytorch-image-models/timm/layers/cond_conv2d.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch Conditionally Parameterized Convolution (CondConv)
2
+
3
+ Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference
4
+ (https://arxiv.org/abs/1904.04971)
5
+
6
+ Hacked together by / Copyright 2020 Ross Wightman
7
+ """
8
+
9
+ import math
10
+ from functools import partial
11
+ import numpy as np
12
+ import torch
13
+ from torch import nn as nn
14
+ from torch.nn import functional as F
15
+
16
+ from .helpers import to_2tuple
17
+ from .conv2d_same import conv2d_same
18
+ from .padding import get_padding_value
19
+
20
+
21
+ def get_condconv_initializer(initializer, num_experts, expert_shape):
22
+ def condconv_initializer(weight):
23
+ """CondConv initializer function."""
24
+ num_params = np.prod(expert_shape)
25
+ if (len(weight.shape) != 2 or weight.shape[0] != num_experts or
26
+ weight.shape[1] != num_params):
27
+ raise (ValueError(
28
+ 'CondConv variables must have shape [num_experts, num_params]'))
29
+ for i in range(num_experts):
30
+ initializer(weight[i].view(expert_shape))
31
+ return condconv_initializer
32
+
33
+
34
+ class CondConv2d(nn.Module):
35
+ """ Conditionally Parameterized Convolution
36
+ Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py
37
+
38
+ Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:
39
+ https://github.com/pytorch/pytorch/issues/17983
40
+ """
41
+ __constants__ = ['in_channels', 'out_channels', 'dynamic_padding']
42
+
43
+ def __init__(self, in_channels, out_channels, kernel_size=3,
44
+ stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4):
45
+ super(CondConv2d, self).__init__()
46
+
47
+ self.in_channels = in_channels
48
+ self.out_channels = out_channels
49
+ self.kernel_size = to_2tuple(kernel_size)
50
+ self.stride = to_2tuple(stride)
51
+ padding_val, is_padding_dynamic = get_padding_value(
52
+ padding, kernel_size, stride=stride, dilation=dilation)
53
+ self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript
54
+ self.padding = to_2tuple(padding_val)
55
+ self.dilation = to_2tuple(dilation)
56
+ self.groups = groups
57
+ self.num_experts = num_experts
58
+
59
+ self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size
60
+ weight_num_param = 1
61
+ for wd in self.weight_shape:
62
+ weight_num_param *= wd
63
+ self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param))
64
+
65
+ if bias:
66
+ self.bias_shape = (self.out_channels,)
67
+ self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels))
68
+ else:
69
+ self.register_parameter('bias', None)
70
+
71
+ self.reset_parameters()
72
+
73
+ def reset_parameters(self):
74
+ init_weight = get_condconv_initializer(
75
+ partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape)
76
+ init_weight(self.weight)
77
+ if self.bias is not None:
78
+ fan_in = np.prod(self.weight_shape[1:])
79
+ bound = 1 / math.sqrt(fan_in)
80
+ init_bias = get_condconv_initializer(
81
+ partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape)
82
+ init_bias(self.bias)
83
+
84
+ def forward(self, x, routing_weights):
85
+ B, C, H, W = x.shape
86
+ weight = torch.matmul(routing_weights, self.weight)
87
+ new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size
88
+ weight = weight.view(new_weight_shape)
89
+ bias = None
90
+ if self.bias is not None:
91
+ bias = torch.matmul(routing_weights, self.bias)
92
+ bias = bias.view(B * self.out_channels)
93
+ # move batch elements with channels so each batch element can be efficiently convolved with separate kernel
94
+ # reshape instead of view to work with channels_last input
95
+ x = x.reshape(1, B * C, H, W)
96
+ if self.dynamic_padding:
97
+ out = conv2d_same(
98
+ x, weight, bias, stride=self.stride, padding=self.padding,
99
+ dilation=self.dilation, groups=self.groups * B)
100
+ else:
101
+ out = F.conv2d(
102
+ x, weight, bias, stride=self.stride, padding=self.padding,
103
+ dilation=self.dilation, groups=self.groups * B)
104
+ out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1])
105
+
106
+ # Literal port (from TF definition)
107
+ # x = torch.split(x, 1, 0)
108
+ # weight = torch.split(weight, 1, 0)
109
+ # if self.bias is not None:
110
+ # bias = torch.matmul(routing_weights, self.bias)
111
+ # bias = torch.split(bias, 1, 0)
112
+ # else:
113
+ # bias = [None] * B
114
+ # out = []
115
+ # for xi, wi, bi in zip(x, weight, bias):
116
+ # wi = wi.view(*self.weight_shape)
117
+ # if bi is not None:
118
+ # bi = bi.view(*self.bias_shape)
119
+ # out.append(self.conv_fn(
120
+ # xi, wi, bi, stride=self.stride, padding=self.padding,
121
+ # dilation=self.dilation, groups=self.groups))
122
+ # out = torch.cat(out, 0)
123
+ return out
pytorch-image-models/timm/layers/config.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Model / Layer Config singleton state
2
+ """
3
+ import os
4
+ import warnings
5
+ from typing import Any, Optional
6
+
7
+ import torch
8
+
9
+ __all__ = [
10
+ 'is_exportable', 'is_scriptable', 'is_no_jit', 'use_fused_attn',
11
+ 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config', 'set_fused_attn'
12
+ ]
13
+
14
+ # Set to True if prefer to have layers with no jit optimization (includes activations)
15
+ _NO_JIT = False
16
+
17
+ # Set to True if prefer to have activation layers with no jit optimization
18
+ # NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying
19
+ # the jit flags so far are activations. This will change as more layers are updated and/or added.
20
+ _NO_ACTIVATION_JIT = False
21
+
22
+ # Set to True if exporting a model with Same padding via ONNX
23
+ _EXPORTABLE = False
24
+
25
+ # Set to True if wanting to use torch.jit.script on a model
26
+ _SCRIPTABLE = False
27
+
28
+
29
+ # use torch.scaled_dot_product_attention where possible
30
+ _HAS_FUSED_ATTN = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
31
+ if 'TIMM_FUSED_ATTN' in os.environ:
32
+ _USE_FUSED_ATTN = int(os.environ['TIMM_FUSED_ATTN'])
33
+ else:
34
+ _USE_FUSED_ATTN = 1 # 0 == off, 1 == on (for tested use), 2 == on (for experimental use)
35
+
36
+
37
+ def is_no_jit():
38
+ return _NO_JIT
39
+
40
+
41
+ class set_no_jit:
42
+ def __init__(self, mode: bool) -> None:
43
+ global _NO_JIT
44
+ self.prev = _NO_JIT
45
+ _NO_JIT = mode
46
+
47
+ def __enter__(self) -> None:
48
+ pass
49
+
50
+ def __exit__(self, *args: Any) -> bool:
51
+ global _NO_JIT
52
+ _NO_JIT = self.prev
53
+ return False
54
+
55
+
56
+ def is_exportable():
57
+ return _EXPORTABLE
58
+
59
+
60
+ class set_exportable:
61
+ def __init__(self, mode: bool) -> None:
62
+ global _EXPORTABLE
63
+ self.prev = _EXPORTABLE
64
+ _EXPORTABLE = mode
65
+
66
+ def __enter__(self) -> None:
67
+ pass
68
+
69
+ def __exit__(self, *args: Any) -> bool:
70
+ global _EXPORTABLE
71
+ _EXPORTABLE = self.prev
72
+ return False
73
+
74
+
75
+ def is_scriptable():
76
+ return _SCRIPTABLE
77
+
78
+
79
+ class set_scriptable:
80
+ def __init__(self, mode: bool) -> None:
81
+ global _SCRIPTABLE
82
+ self.prev = _SCRIPTABLE
83
+ _SCRIPTABLE = mode
84
+
85
+ def __enter__(self) -> None:
86
+ pass
87
+
88
+ def __exit__(self, *args: Any) -> bool:
89
+ global _SCRIPTABLE
90
+ _SCRIPTABLE = self.prev
91
+ return False
92
+
93
+
94
+ class set_layer_config:
95
+ """ Layer config context manager that allows setting all layer config flags at once.
96
+ If a flag arg is None, it will not change the current value.
97
+ """
98
+ def __init__(
99
+ self,
100
+ scriptable: Optional[bool] = None,
101
+ exportable: Optional[bool] = None,
102
+ no_jit: Optional[bool] = None,
103
+ no_activation_jit: Optional[bool] = None):
104
+ global _SCRIPTABLE
105
+ global _EXPORTABLE
106
+ global _NO_JIT
107
+ global _NO_ACTIVATION_JIT
108
+ self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT
109
+ if scriptable is not None:
110
+ _SCRIPTABLE = scriptable
111
+ if exportable is not None:
112
+ _EXPORTABLE = exportable
113
+ if no_jit is not None:
114
+ _NO_JIT = no_jit
115
+ if no_activation_jit is not None:
116
+ _NO_ACTIVATION_JIT = no_activation_jit
117
+
118
+ def __enter__(self) -> None:
119
+ pass
120
+
121
+ def __exit__(self, *args: Any) -> bool:
122
+ global _SCRIPTABLE
123
+ global _EXPORTABLE
124
+ global _NO_JIT
125
+ global _NO_ACTIVATION_JIT
126
+ _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev
127
+ return False
128
+
129
+
130
+ def use_fused_attn(experimental: bool = False) -> bool:
131
+ # NOTE: ONNX export cannot handle F.scaled_dot_product_attention as of pytorch 2.0
132
+ if not _HAS_FUSED_ATTN or _EXPORTABLE:
133
+ return False
134
+ if experimental:
135
+ return _USE_FUSED_ATTN > 1
136
+ return _USE_FUSED_ATTN > 0
137
+
138
+
139
+ def set_fused_attn(enable: bool = True, experimental: bool = False):
140
+ global _USE_FUSED_ATTN
141
+ if not _HAS_FUSED_ATTN:
142
+ warnings.warn('This version of pytorch does not have F.scaled_dot_product_attention, fused_attn flag ignored.')
143
+ return
144
+ if experimental and enable:
145
+ _USE_FUSED_ATTN = 2
146
+ elif enable:
147
+ _USE_FUSED_ATTN = 1
148
+ else:
149
+ _USE_FUSED_ATTN = 0
pytorch-image-models/timm/layers/conv2d_same.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Conv2d w/ Same Padding
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from typing import Tuple, Optional
9
+
10
+ from .config import is_exportable, is_scriptable
11
+ from .padding import pad_same, pad_same_arg, get_padding_value
12
+
13
+
14
+ _USE_EXPORT_CONV = False
15
+
16
+
17
+ def conv2d_same(
18
+ x,
19
+ weight: torch.Tensor,
20
+ bias: Optional[torch.Tensor] = None,
21
+ stride: Tuple[int, int] = (1, 1),
22
+ padding: Tuple[int, int] = (0, 0),
23
+ dilation: Tuple[int, int] = (1, 1),
24
+ groups: int = 1,
25
+ ):
26
+ x = pad_same(x, weight.shape[-2:], stride, dilation)
27
+ return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
28
+
29
+
30
+ class Conv2dSame(nn.Conv2d):
31
+ """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ in_channels,
37
+ out_channels,
38
+ kernel_size,
39
+ stride=1,
40
+ padding=0,
41
+ dilation=1,
42
+ groups=1,
43
+ bias=True,
44
+ ):
45
+ super(Conv2dSame, self).__init__(
46
+ in_channels, out_channels, kernel_size,
47
+ stride, 0, dilation, groups, bias,
48
+ )
49
+
50
+ def forward(self, x):
51
+ return conv2d_same(
52
+ x, self.weight, self.bias,
53
+ self.stride, self.padding, self.dilation, self.groups,
54
+ )
55
+
56
+
57
+ class Conv2dSameExport(nn.Conv2d):
58
+ """ ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions
59
+
60
+ NOTE: This does not currently work with torch.jit.script
61
+ """
62
+
63
+ # pylint: disable=unused-argument
64
+ def __init__(
65
+ self,
66
+ in_channels,
67
+ out_channels,
68
+ kernel_size,
69
+ stride=1,
70
+ padding=0,
71
+ dilation=1,
72
+ groups=1,
73
+ bias=True,
74
+ ):
75
+ super(Conv2dSameExport, self).__init__(
76
+ in_channels, out_channels, kernel_size,
77
+ stride, 0, dilation, groups, bias,
78
+ )
79
+ self.pad = None
80
+ self.pad_input_size = (0, 0)
81
+
82
+ def forward(self, x):
83
+ input_size = x.size()[-2:]
84
+ if self.pad is None:
85
+ pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation)
86
+ self.pad = nn.ZeroPad2d(pad_arg)
87
+ self.pad_input_size = input_size
88
+
89
+ x = self.pad(x)
90
+ return F.conv2d(
91
+ x, self.weight, self.bias,
92
+ self.stride, self.padding, self.dilation, self.groups,
93
+ )
94
+
95
+
96
+ def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
97
+ padding = kwargs.pop('padding', '')
98
+ kwargs.setdefault('bias', False)
99
+ padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
100
+ if is_dynamic:
101
+ if _USE_EXPORT_CONV and is_exportable():
102
+ # older PyTorch ver needed this to export same padding reasonably
103
+ assert not is_scriptable() # Conv2DSameExport does not work with jit
104
+ return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs)
105
+ else:
106
+ return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
107
+ else:
108
+ return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
109
+
110
+
pytorch-image-models/timm/layers/conv_bn_act.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Conv2d + BN + Act
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ from typing import Any, Dict, Optional, Type
6
+
7
+ from torch import nn as nn
8
+
9
+ from .typing import LayerType, PadType
10
+ from .blur_pool import create_aa
11
+ from .create_conv2d import create_conv2d
12
+ from .create_norm_act import get_norm_act_layer
13
+
14
+
15
+ class ConvNormAct(nn.Module):
16
+ def __init__(
17
+ self,
18
+ in_channels: int,
19
+ out_channels: int,
20
+ kernel_size: int = 1,
21
+ stride: int = 1,
22
+ padding: PadType = '',
23
+ dilation: int = 1,
24
+ groups: int = 1,
25
+ bias: bool = False,
26
+ apply_norm: bool = True,
27
+ apply_act: bool = True,
28
+ norm_layer: LayerType = nn.BatchNorm2d,
29
+ act_layer: Optional[LayerType] = nn.ReLU,
30
+ aa_layer: Optional[LayerType] = None,
31
+ drop_layer: Optional[Type[nn.Module]] = None,
32
+ conv_kwargs: Optional[Dict[str, Any]] = None,
33
+ norm_kwargs: Optional[Dict[str, Any]] = None,
34
+ act_kwargs: Optional[Dict[str, Any]] = None,
35
+ ):
36
+ super(ConvNormAct, self).__init__()
37
+ conv_kwargs = conv_kwargs or {}
38
+ norm_kwargs = norm_kwargs or {}
39
+ act_kwargs = act_kwargs or {}
40
+ use_aa = aa_layer is not None and stride > 1
41
+
42
+ self.conv = create_conv2d(
43
+ in_channels,
44
+ out_channels,
45
+ kernel_size,
46
+ stride=1 if use_aa else stride,
47
+ padding=padding,
48
+ dilation=dilation,
49
+ groups=groups,
50
+ bias=bias,
51
+ **conv_kwargs,
52
+ )
53
+
54
+ if apply_norm:
55
+ # NOTE for backwards compatibility with models that use separate norm and act layer definitions
56
+ norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
57
+ # NOTE for backwards (weight) compatibility, norm layer name remains `.bn`
58
+ if drop_layer:
59
+ norm_kwargs['drop_layer'] = drop_layer
60
+ self.bn = norm_act_layer(
61
+ out_channels,
62
+ apply_act=apply_act,
63
+ act_kwargs=act_kwargs,
64
+ **norm_kwargs,
65
+ )
66
+ else:
67
+ self.bn = nn.Sequential()
68
+ if drop_layer:
69
+ norm_kwargs['drop_layer'] = drop_layer
70
+ self.bn.add_module('drop', drop_layer())
71
+
72
+ self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa, noop=None)
73
+
74
+ @property
75
+ def in_channels(self):
76
+ return self.conv.in_channels
77
+
78
+ @property
79
+ def out_channels(self):
80
+ return self.conv.out_channels
81
+
82
+ def forward(self, x):
83
+ x = self.conv(x)
84
+ x = self.bn(x)
85
+ aa = getattr(self, 'aa', None)
86
+ if aa is not None:
87
+ x = self.aa(x)
88
+ return x
89
+
90
+
91
+ ConvBnAct = ConvNormAct
92
+ ConvNormActAa = ConvNormAct # backwards compat, when they were separate
pytorch-image-models/timm/layers/create_act.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Activation Factory
2
+ Hacked together by / Copyright 2020 Ross Wightman
3
+ """
4
+ from typing import Union, Callable, Type
5
+
6
+ from .activations import *
7
+ from .activations_me import *
8
+ from .config import is_exportable, is_scriptable
9
+
10
+ # PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7.
11
+ # Also hardsigmoid, hardswish, and soon mish. This code will use native version if present.
12
+ # Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used.
13
+ _has_silu = 'silu' in dir(torch.nn.functional)
14
+ _has_hardswish = 'hardswish' in dir(torch.nn.functional)
15
+ _has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional)
16
+ _has_mish = 'mish' in dir(torch.nn.functional)
17
+
18
+
19
+ _ACT_FN_DEFAULT = dict(
20
+ silu=F.silu if _has_silu else swish,
21
+ swish=F.silu if _has_silu else swish,
22
+ mish=F.mish if _has_mish else mish,
23
+ relu=F.relu,
24
+ relu6=F.relu6,
25
+ leaky_relu=F.leaky_relu,
26
+ elu=F.elu,
27
+ celu=F.celu,
28
+ selu=F.selu,
29
+ gelu=gelu,
30
+ gelu_tanh=gelu_tanh,
31
+ quick_gelu=quick_gelu,
32
+ sigmoid=sigmoid,
33
+ tanh=tanh,
34
+ hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid,
35
+ hard_swish=F.hardswish if _has_hardswish else hard_swish,
36
+ hard_mish=hard_mish,
37
+ )
38
+
39
+ _ACT_FN_ME = dict(
40
+ silu=F.silu if _has_silu else swish_me,
41
+ swish=F.silu if _has_silu else swish_me,
42
+ mish=F.mish if _has_mish else mish_me,
43
+ hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me,
44
+ hard_swish=F.hardswish if _has_hardswish else hard_swish_me,
45
+ hard_mish=hard_mish_me,
46
+ )
47
+
48
+ _ACT_FNS = (_ACT_FN_ME, _ACT_FN_DEFAULT)
49
+ for a in _ACT_FNS:
50
+ a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
51
+ a.setdefault('hardswish', a.get('hard_swish'))
52
+
53
+
54
+ _ACT_LAYER_DEFAULT = dict(
55
+ silu=nn.SiLU if _has_silu else Swish,
56
+ swish=nn.SiLU if _has_silu else Swish,
57
+ mish=nn.Mish if _has_mish else Mish,
58
+ relu=nn.ReLU,
59
+ relu6=nn.ReLU6,
60
+ leaky_relu=nn.LeakyReLU,
61
+ elu=nn.ELU,
62
+ prelu=PReLU,
63
+ celu=nn.CELU,
64
+ selu=nn.SELU,
65
+ gelu=GELU,
66
+ gelu_tanh=GELUTanh,
67
+ quick_gelu=QuickGELU,
68
+ sigmoid=Sigmoid,
69
+ tanh=Tanh,
70
+ hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid,
71
+ hard_swish=nn.Hardswish if _has_hardswish else HardSwish,
72
+ hard_mish=HardMish,
73
+ identity=nn.Identity,
74
+ )
75
+
76
+ _ACT_LAYER_ME = dict(
77
+ silu=nn.SiLU if _has_silu else SwishMe,
78
+ swish=nn.SiLU if _has_silu else SwishMe,
79
+ mish=nn.Mish if _has_mish else MishMe,
80
+ hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe,
81
+ hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe,
82
+ hard_mish=HardMishMe,
83
+ )
84
+
85
+ _ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_DEFAULT)
86
+ for a in _ACT_LAYERS:
87
+ a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
88
+ a.setdefault('hardswish', a.get('hard_swish'))
89
+
90
+
91
+ def get_act_fn(name: Union[Callable, str] = 'relu'):
92
+ """ Activation Function Factory
93
+ Fetching activation fns by name with this function allows export or torch script friendly
94
+ functions to be returned dynamically based on current config.
95
+ """
96
+ if not name:
97
+ return None
98
+ if isinstance(name, Callable):
99
+ return name
100
+ name = name.lower()
101
+ if not (is_exportable() or is_scriptable()):
102
+ # If not exporting or scripting the model, first look for a memory-efficient version with
103
+ # custom autograd, then fallback
104
+ if name in _ACT_FN_ME:
105
+ return _ACT_FN_ME[name]
106
+ return _ACT_FN_DEFAULT[name]
107
+
108
+
109
+ def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'):
110
+ """ Activation Layer Factory
111
+ Fetching activation layers by name with this function allows export or torch script friendly
112
+ functions to be returned dynamically based on current config.
113
+ """
114
+ if name is None:
115
+ return None
116
+ if not isinstance(name, str):
117
+ # callable, module, etc
118
+ return name
119
+ if not name:
120
+ return None
121
+ name = name.lower()
122
+ if not (is_exportable() or is_scriptable()):
123
+ if name in _ACT_LAYER_ME:
124
+ return _ACT_LAYER_ME[name]
125
+ return _ACT_LAYER_DEFAULT[name]
126
+
127
+
128
+ def create_act_layer(name: Union[Type[nn.Module], str], inplace=None, **kwargs):
129
+ act_layer = get_act_layer(name)
130
+ if act_layer is None:
131
+ return None
132
+ if inplace is None:
133
+ return act_layer(**kwargs)
134
+ try:
135
+ return act_layer(inplace=inplace, **kwargs)
136
+ except TypeError:
137
+ # recover if act layer doesn't have inplace arg
138
+ return act_layer(**kwargs)
pytorch-image-models/timm/layers/create_attn.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Attention Factory
2
+
3
+ Hacked together by / Copyright 2021 Ross Wightman
4
+ """
5
+ import torch
6
+ from functools import partial
7
+
8
+ from .bottleneck_attn import BottleneckAttn
9
+ from .cbam import CbamModule, LightCbamModule
10
+ from .eca import EcaModule, CecaModule
11
+ from .gather_excite import GatherExcite
12
+ from .global_context import GlobalContext
13
+ from .halo_attn import HaloAttn
14
+ from .lambda_layer import LambdaLayer
15
+ from .non_local_attn import NonLocalAttn, BatNonLocalAttn
16
+ from .selective_kernel import SelectiveKernel
17
+ from .split_attn import SplitAttn
18
+ from .squeeze_excite import SEModule, EffectiveSEModule
19
+
20
+
21
+ def get_attn(attn_type):
22
+ if isinstance(attn_type, torch.nn.Module):
23
+ return attn_type
24
+ module_cls = None
25
+ if attn_type:
26
+ if isinstance(attn_type, str):
27
+ attn_type = attn_type.lower()
28
+ # Lightweight attention modules (channel and/or coarse spatial).
29
+ # Typically added to existing network architecture blocks in addition to existing convolutions.
30
+ if attn_type == 'se':
31
+ module_cls = SEModule
32
+ elif attn_type == 'ese':
33
+ module_cls = EffectiveSEModule
34
+ elif attn_type == 'eca':
35
+ module_cls = EcaModule
36
+ elif attn_type == 'ecam':
37
+ module_cls = partial(EcaModule, use_mlp=True)
38
+ elif attn_type == 'ceca':
39
+ module_cls = CecaModule
40
+ elif attn_type == 'ge':
41
+ module_cls = GatherExcite
42
+ elif attn_type == 'gc':
43
+ module_cls = GlobalContext
44
+ elif attn_type == 'gca':
45
+ module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False)
46
+ elif attn_type == 'cbam':
47
+ module_cls = CbamModule
48
+ elif attn_type == 'lcbam':
49
+ module_cls = LightCbamModule
50
+
51
+ # Attention / attention-like modules w/ significant params
52
+ # Typically replace some of the existing workhorse convs in a network architecture.
53
+ # All of these accept a stride argument and can spatially downsample the input.
54
+ elif attn_type == 'sk':
55
+ module_cls = SelectiveKernel
56
+ elif attn_type == 'splat':
57
+ module_cls = SplitAttn
58
+
59
+ # Self-attention / attention-like modules w/ significant compute and/or params
60
+ # Typically replace some of the existing workhorse convs in a network architecture.
61
+ # All of these accept a stride argument and can spatially downsample the input.
62
+ elif attn_type == 'lambda':
63
+ return LambdaLayer
64
+ elif attn_type == 'bottleneck':
65
+ return BottleneckAttn
66
+ elif attn_type == 'halo':
67
+ return HaloAttn
68
+ elif attn_type == 'nl':
69
+ module_cls = NonLocalAttn
70
+ elif attn_type == 'bat':
71
+ module_cls = BatNonLocalAttn
72
+
73
+ # Woops!
74
+ else:
75
+ assert False, "Invalid attn module (%s)" % attn_type
76
+ elif isinstance(attn_type, bool):
77
+ if attn_type:
78
+ module_cls = SEModule
79
+ else:
80
+ module_cls = attn_type
81
+ return module_cls
82
+
83
+
84
+ def create_attn(attn_type, channels, **kwargs):
85
+ module_cls = get_attn(attn_type)
86
+ if module_cls is not None:
87
+ # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
88
+ return module_cls(channels, **kwargs)
89
+ return None
pytorch-image-models/timm/layers/create_conv2d.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Create Conv2d Factory Method
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+
6
+ from .mixed_conv2d import MixedConv2d
7
+ from .cond_conv2d import CondConv2d
8
+ from .conv2d_same import create_conv2d_pad
9
+
10
+
11
+ def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
12
+ """ Select a 2d convolution implementation based on arguments
13
+ Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
14
+
15
+ Used extensively by EfficientNet, MobileNetv3 and related networks.
16
+ """
17
+ if isinstance(kernel_size, list):
18
+ assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
19
+ if 'groups' in kwargs:
20
+ groups = kwargs.pop('groups')
21
+ if groups == in_channels:
22
+ kwargs['depthwise'] = True
23
+ else:
24
+ assert groups == 1
25
+ # We're going to use only lists for defining the MixedConv2d kernel groups,
26
+ # ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
27
+ m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)
28
+ else:
29
+ depthwise = kwargs.pop('depthwise', False)
30
+ # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0
31
+ groups = in_channels if depthwise else kwargs.pop('groups', 1)
32
+ if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
33
+ m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
34
+ else:
35
+ m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
36
+ return m
pytorch-image-models/timm/layers/evo_norm.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ EvoNorm in PyTorch
2
+
3
+ Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967
4
+ @inproceedings{NEURIPS2020,
5
+ author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc},
6
+ booktitle = {Advances in Neural Information Processing Systems},
7
+ editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
8
+ pages = {13539--13550},
9
+ publisher = {Curran Associates, Inc.},
10
+ title = {Evolving Normalization-Activation Layers},
11
+ url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf},
12
+ volume = {33},
13
+ year = {2020}
14
+ }
15
+
16
+ An attempt at getting decent performing EvoNorms running in PyTorch.
17
+ While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm
18
+ in terms of memory usage and throughput on GPUs.
19
+
20
+ I'm testing these modules on TPU w/ PyTorch XLA. Promising start but
21
+ currently working around some issues with builtin torch/tensor.var/std. Unlike
22
+ GPU, similar train speeds for EvoNormS variants and BatchNorm.
23
+
24
+ Hacked together by / Copyright 2020 Ross Wightman
25
+ """
26
+ from typing import Sequence, Union
27
+
28
+ import torch
29
+ import torch.nn as nn
30
+ import torch.nn.functional as F
31
+
32
+ from .create_act import create_act_layer
33
+ from .trace_utils import _assert
34
+
35
+
36
+ def instance_std(x, eps: float = 1e-5):
37
+ std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype)
38
+ return std.expand(x.shape)
39
+
40
+
41
+ def instance_std_tpu(x, eps: float = 1e-5):
42
+ std = manual_var(x, dim=(2, 3)).add(eps).sqrt()
43
+ return std.expand(x.shape)
44
+ # instance_std = instance_std_tpu
45
+
46
+
47
+ def instance_rms(x, eps: float = 1e-5):
48
+ rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype)
49
+ return rms.expand(x.shape)
50
+
51
+
52
+ def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False):
53
+ xm = x.mean(dim=dim, keepdim=True)
54
+ if diff_sqm:
55
+ # difference of squared mean and mean squared, faster on TPU can be less stable
56
+ var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0)
57
+ else:
58
+ var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True)
59
+ return var
60
+
61
+
62
+ def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False):
63
+ B, C, H, W = x.shape
64
+ x_dtype = x.dtype
65
+ _assert(C % groups == 0, '')
66
+ if flatten:
67
+ x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues
68
+ std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
69
+ else:
70
+ x = x.reshape(B, groups, C // groups, H, W)
71
+ std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
72
+ return std.expand(x.shape).reshape(B, C, H, W)
73
+
74
+
75
+ def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False):
76
+ # This is a workaround for some stability / odd behaviour of .var and .std
77
+ # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results
78
+ B, C, H, W = x.shape
79
+ _assert(C % groups == 0, '')
80
+ if flatten:
81
+ x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues
82
+ var = manual_var(x, dim=-1, diff_sqm=diff_sqm)
83
+ else:
84
+ x = x.reshape(B, groups, C // groups, H, W)
85
+ var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm)
86
+ return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W)
87
+ #group_std = group_std_tpu # FIXME TPU temporary
88
+
89
+
90
+ def group_rms(x, groups: int = 32, eps: float = 1e-5):
91
+ B, C, H, W = x.shape
92
+ _assert(C % groups == 0, '')
93
+ x_dtype = x.dtype
94
+ x = x.reshape(B, groups, C // groups, H, W)
95
+ rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype)
96
+ return rms.expand(x.shape).reshape(B, C, H, W)
97
+
98
+
99
+ class EvoNorm2dB0(nn.Module):
100
+ def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_):
101
+ super().__init__()
102
+ self.apply_act = apply_act # apply activation (non-linearity)
103
+ self.momentum = momentum
104
+ self.eps = eps
105
+ self.weight = nn.Parameter(torch.ones(num_features))
106
+ self.bias = nn.Parameter(torch.zeros(num_features))
107
+ self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None
108
+ self.register_buffer('running_var', torch.ones(num_features))
109
+ self.reset_parameters()
110
+
111
+ def reset_parameters(self):
112
+ nn.init.ones_(self.weight)
113
+ nn.init.zeros_(self.bias)
114
+ if self.v is not None:
115
+ nn.init.ones_(self.v)
116
+
117
+ def forward(self, x):
118
+ _assert(x.dim() == 4, 'expected 4D input')
119
+ x_dtype = x.dtype
120
+ v_shape = (1, -1, 1, 1)
121
+ if self.v is not None:
122
+ if self.training:
123
+ var = x.float().var(dim=(0, 2, 3), unbiased=False)
124
+ # var = manual_var(x, dim=(0, 2, 3)).squeeze()
125
+ n = x.numel() / x.shape[1]
126
+ self.running_var.copy_(
127
+ self.running_var * (1 - self.momentum) +
128
+ var.detach() * self.momentum * (n / (n - 1)))
129
+ else:
130
+ var = self.running_var
131
+ left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x)
132
+ v = self.v.to(x_dtype).view(v_shape)
133
+ right = x * v + instance_std(x, self.eps)
134
+ x = x / left.max(right)
135
+ return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape)
136
+
137
+
138
+ class EvoNorm2dB1(nn.Module):
139
+ def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_):
140
+ super().__init__()
141
+ self.apply_act = apply_act # apply activation (non-linearity)
142
+ self.momentum = momentum
143
+ self.eps = eps
144
+ self.weight = nn.Parameter(torch.ones(num_features))
145
+ self.bias = nn.Parameter(torch.zeros(num_features))
146
+ self.register_buffer('running_var', torch.ones(num_features))
147
+ self.reset_parameters()
148
+
149
+ def reset_parameters(self):
150
+ nn.init.ones_(self.weight)
151
+ nn.init.zeros_(self.bias)
152
+
153
+ def forward(self, x):
154
+ _assert(x.dim() == 4, 'expected 4D input')
155
+ x_dtype = x.dtype
156
+ v_shape = (1, -1, 1, 1)
157
+ if self.apply_act:
158
+ if self.training:
159
+ var = x.float().var(dim=(0, 2, 3), unbiased=False)
160
+ n = x.numel() / x.shape[1]
161
+ self.running_var.copy_(
162
+ self.running_var * (1 - self.momentum) +
163
+ var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1)))
164
+ else:
165
+ var = self.running_var
166
+ var = var.to(x_dtype).view(v_shape)
167
+ left = var.add(self.eps).sqrt_()
168
+ right = (x + 1) * instance_rms(x, self.eps)
169
+ x = x / left.max(right)
170
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
171
+
172
+
173
+ class EvoNorm2dB2(nn.Module):
174
+ def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_):
175
+ super().__init__()
176
+ self.apply_act = apply_act # apply activation (non-linearity)
177
+ self.momentum = momentum
178
+ self.eps = eps
179
+ self.weight = nn.Parameter(torch.ones(num_features))
180
+ self.bias = nn.Parameter(torch.zeros(num_features))
181
+ self.register_buffer('running_var', torch.ones(num_features))
182
+ self.reset_parameters()
183
+
184
+ def reset_parameters(self):
185
+ nn.init.ones_(self.weight)
186
+ nn.init.zeros_(self.bias)
187
+
188
+ def forward(self, x):
189
+ _assert(x.dim() == 4, 'expected 4D input')
190
+ x_dtype = x.dtype
191
+ v_shape = (1, -1, 1, 1)
192
+ if self.apply_act:
193
+ if self.training:
194
+ var = x.float().var(dim=(0, 2, 3), unbiased=False)
195
+ n = x.numel() / x.shape[1]
196
+ self.running_var.copy_(
197
+ self.running_var * (1 - self.momentum) +
198
+ var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1)))
199
+ else:
200
+ var = self.running_var
201
+ var = var.to(x_dtype).view(v_shape)
202
+ left = var.add(self.eps).sqrt_()
203
+ right = instance_rms(x, self.eps) - x
204
+ x = x / left.max(right)
205
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
206
+
207
+
208
+ class EvoNorm2dS0(nn.Module):
209
+ def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_):
210
+ super().__init__()
211
+ self.apply_act = apply_act # apply activation (non-linearity)
212
+ if group_size:
213
+ assert num_features % group_size == 0
214
+ self.groups = num_features // group_size
215
+ else:
216
+ self.groups = groups
217
+ self.eps = eps
218
+ self.weight = nn.Parameter(torch.ones(num_features))
219
+ self.bias = nn.Parameter(torch.zeros(num_features))
220
+ self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None
221
+ self.reset_parameters()
222
+
223
+ def reset_parameters(self):
224
+ nn.init.ones_(self.weight)
225
+ nn.init.zeros_(self.bias)
226
+ if self.v is not None:
227
+ nn.init.ones_(self.v)
228
+
229
+ def forward(self, x):
230
+ _assert(x.dim() == 4, 'expected 4D input')
231
+ x_dtype = x.dtype
232
+ v_shape = (1, -1, 1, 1)
233
+ if self.v is not None:
234
+ v = self.v.view(v_shape).to(x_dtype)
235
+ x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps)
236
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
237
+
238
+
239
+ class EvoNorm2dS0a(EvoNorm2dS0):
240
+ def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_):
241
+ super().__init__(
242
+ num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps)
243
+
244
+ def forward(self, x):
245
+ _assert(x.dim() == 4, 'expected 4D input')
246
+ x_dtype = x.dtype
247
+ v_shape = (1, -1, 1, 1)
248
+ d = group_std(x, self.groups, self.eps)
249
+ if self.v is not None:
250
+ v = self.v.view(v_shape).to(x_dtype)
251
+ x = x * (x * v).sigmoid()
252
+ x = x / d
253
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
254
+
255
+
256
+ class EvoNorm2dS1(nn.Module):
257
+ def __init__(
258
+ self, num_features, groups=32, group_size=None,
259
+ apply_act=True, act_layer=None, eps=1e-5, **_):
260
+ super().__init__()
261
+ act_layer = act_layer or nn.SiLU
262
+ self.apply_act = apply_act # apply activation (non-linearity)
263
+ if act_layer is not None and apply_act:
264
+ self.act = create_act_layer(act_layer)
265
+ else:
266
+ self.act = nn.Identity()
267
+ if group_size:
268
+ assert num_features % group_size == 0
269
+ self.groups = num_features // group_size
270
+ else:
271
+ self.groups = groups
272
+ self.eps = eps
273
+ self.pre_act_norm = False
274
+ self.weight = nn.Parameter(torch.ones(num_features))
275
+ self.bias = nn.Parameter(torch.zeros(num_features))
276
+ self.reset_parameters()
277
+
278
+ def reset_parameters(self):
279
+ nn.init.ones_(self.weight)
280
+ nn.init.zeros_(self.bias)
281
+
282
+ def forward(self, x):
283
+ _assert(x.dim() == 4, 'expected 4D input')
284
+ x_dtype = x.dtype
285
+ v_shape = (1, -1, 1, 1)
286
+ if self.apply_act:
287
+ x = self.act(x) / group_std(x, self.groups, self.eps)
288
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
289
+
290
+
291
+ class EvoNorm2dS1a(EvoNorm2dS1):
292
+ def __init__(
293
+ self, num_features, groups=32, group_size=None,
294
+ apply_act=True, act_layer=None, eps=1e-3, **_):
295
+ super().__init__(
296
+ num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps)
297
+
298
+ def forward(self, x):
299
+ _assert(x.dim() == 4, 'expected 4D input')
300
+ x_dtype = x.dtype
301
+ v_shape = (1, -1, 1, 1)
302
+ x = self.act(x) / group_std(x, self.groups, self.eps)
303
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
304
+
305
+
306
+ class EvoNorm2dS2(nn.Module):
307
+ def __init__(
308
+ self, num_features, groups=32, group_size=None,
309
+ apply_act=True, act_layer=None, eps=1e-5, **_):
310
+ super().__init__()
311
+ act_layer = act_layer or nn.SiLU
312
+ self.apply_act = apply_act # apply activation (non-linearity)
313
+ if act_layer is not None and apply_act:
314
+ self.act = create_act_layer(act_layer)
315
+ else:
316
+ self.act = nn.Identity()
317
+ if group_size:
318
+ assert num_features % group_size == 0
319
+ self.groups = num_features // group_size
320
+ else:
321
+ self.groups = groups
322
+ self.eps = eps
323
+ self.weight = nn.Parameter(torch.ones(num_features))
324
+ self.bias = nn.Parameter(torch.zeros(num_features))
325
+ self.reset_parameters()
326
+
327
+ def reset_parameters(self):
328
+ nn.init.ones_(self.weight)
329
+ nn.init.zeros_(self.bias)
330
+
331
+ def forward(self, x):
332
+ _assert(x.dim() == 4, 'expected 4D input')
333
+ x_dtype = x.dtype
334
+ v_shape = (1, -1, 1, 1)
335
+ if self.apply_act:
336
+ x = self.act(x) / group_rms(x, self.groups, self.eps)
337
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
338
+
339
+
340
+ class EvoNorm2dS2a(EvoNorm2dS2):
341
+ def __init__(
342
+ self, num_features, groups=32, group_size=None,
343
+ apply_act=True, act_layer=None, eps=1e-3, **_):
344
+ super().__init__(
345
+ num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps)
346
+
347
+ def forward(self, x):
348
+ _assert(x.dim() == 4, 'expected 4D input')
349
+ x_dtype = x.dtype
350
+ v_shape = (1, -1, 1, 1)
351
+ x = self.act(x) / group_rms(x, self.groups, self.eps)
352
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
pytorch-image-models/timm/layers/fast_norm.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ 'Fast' Normalization Functions
2
+
3
+ For GroupNorm and LayerNorm these functions bypass typical AMP upcast to float32.
4
+
5
+ Additionally, for LayerNorm, the APEX fused LN is used if available (which also does not upcast)
6
+
7
+ Hacked together by / Copyright 2022 Ross Wightman
8
+ """
9
+ from typing import List, Optional
10
+
11
+ import torch
12
+ from torch.nn import functional as F
13
+
14
+ try:
15
+ from apex.normalization.fused_layer_norm import fused_layer_norm_affine
16
+ has_apex = True
17
+ except ImportError:
18
+ has_apex = False
19
+
20
+ try:
21
+ from apex.normalization.fused_layer_norm import fused_rms_norm_affine, fused_rms_norm
22
+ has_apex_rmsnorm = True
23
+ except ImportError:
24
+ has_apex_rmsnorm = False
25
+
26
+
27
+ # fast (ie lower precision LN) can be disabled with this flag if issues crop up
28
+ _USE_FAST_NORM = False # defaulting to False for now
29
+
30
+
31
+ def get_autocast_dtype(device: str = 'cuda'):
32
+ try:
33
+ return torch.get_autocast_dtype(device)
34
+ except (AttributeError, TypeError):
35
+ # dispatch to older device specific fns, only covering cuda/cpu devices here
36
+ if device == 'cpu':
37
+ return torch.get_autocast_cpu_dtype()
38
+ else:
39
+ assert device == 'cuda'
40
+ return torch.get_autocast_gpu_dtype()
41
+
42
+
43
+ def is_autocast_enabled(device: str = 'cuda'):
44
+ try:
45
+ return torch.is_autocast_enabled(device)
46
+ except TypeError:
47
+ # dispatch to older device specific fns, only covering cuda/cpu devices here
48
+ if device == 'cpu':
49
+ return torch.is_autocast_cpu_enabled()
50
+ else:
51
+ assert device == 'cuda'
52
+ return torch.is_autocast_enabled() # defaults cuda (only cuda on older pytorch)
53
+
54
+
55
+ def is_fast_norm():
56
+ return _USE_FAST_NORM
57
+
58
+
59
+ def set_fast_norm(enable=True):
60
+ global _USE_FAST_NORM
61
+ _USE_FAST_NORM = enable
62
+
63
+
64
+ def fast_group_norm(
65
+ x: torch.Tensor,
66
+ num_groups: int,
67
+ weight: Optional[torch.Tensor] = None,
68
+ bias: Optional[torch.Tensor] = None,
69
+ eps: float = 1e-5
70
+ ) -> torch.Tensor:
71
+ if torch.jit.is_scripting():
72
+ # currently cannot use is_autocast_enabled within torchscript
73
+ return F.group_norm(x, num_groups, weight, bias, eps)
74
+
75
+ if is_autocast_enabled(x.device.type):
76
+ # normally native AMP casts GN inputs to float32
77
+ # here we use the low precision autocast dtype
78
+ # FIXME what to do re CPU autocast?
79
+ dt = get_autocast_dtype(x.device.type)
80
+ x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None
81
+
82
+ with torch.amp.autocast(device_type=x.device.type, enabled=False):
83
+ return F.group_norm(x, num_groups, weight, bias, eps)
84
+
85
+
86
+ def fast_layer_norm(
87
+ x: torch.Tensor,
88
+ normalized_shape: List[int],
89
+ weight: Optional[torch.Tensor] = None,
90
+ bias: Optional[torch.Tensor] = None,
91
+ eps: float = 1e-5
92
+ ) -> torch.Tensor:
93
+ if torch.jit.is_scripting():
94
+ # currently cannot use is_autocast_enabled within torchscript
95
+ return F.layer_norm(x, normalized_shape, weight, bias, eps)
96
+
97
+ if has_apex:
98
+ return fused_layer_norm_affine(x, weight, bias, normalized_shape, eps)
99
+
100
+ if is_autocast_enabled(x.device.type):
101
+ # normally native AMP casts LN inputs to float32
102
+ # apex LN does not, this is behaving like Apex
103
+ dt = get_autocast_dtype(x.device.type)
104
+ # FIXME what to do re CPU autocast?
105
+ x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None
106
+
107
+ with torch.amp.autocast(device_type=x.device.type, enabled=False):
108
+ return F.layer_norm(x, normalized_shape, weight, bias, eps)
109
+
110
+
111
+ def rms_norm(
112
+ x: torch.Tensor,
113
+ normalized_shape: List[int],
114
+ weight: Optional[torch.Tensor] = None,
115
+ eps: float = 1e-5,
116
+ ):
117
+ norm_ndim = len(normalized_shape)
118
+ if torch.jit.is_scripting():
119
+ # ndim = len(x.shape)
120
+ # dims = list(range(ndim - norm_ndim, ndim)) # this doesn't work on pytorch <= 1.13.x
121
+ # NOTE -ve dims cause torchscript to crash in some cases, out of options to work around
122
+ assert norm_ndim == 1
123
+ v = torch.var(x, dim=-1).unsqueeze(-1) # ts crashes with -ve dim + keepdim=True
124
+ else:
125
+ dims = tuple(range(-1, -norm_ndim - 1, -1))
126
+ v = torch.var(x, dim=dims, keepdim=True)
127
+ x = x * torch.rsqrt(v + eps)
128
+ if weight is not None:
129
+ x = x * weight
130
+ return x
131
+
132
+
133
+ def fast_rms_norm(
134
+ x: torch.Tensor,
135
+ normalized_shape: List[int],
136
+ weight: Optional[torch.Tensor] = None,
137
+ eps: float = 1e-5,
138
+ ) -> torch.Tensor:
139
+ if torch.jit.is_scripting():
140
+ # this must be by itself, cannot merge with has_apex_rmsnorm
141
+ return rms_norm(x, normalized_shape, weight, eps)
142
+
143
+ if has_apex_rmsnorm:
144
+ if weight is None:
145
+ return fused_rms_norm(x, normalized_shape, eps)
146
+ else:
147
+ return fused_rms_norm_affine(x, weight, normalized_shape, eps)
148
+
149
+ # fallback
150
+ return rms_norm(x, normalized_shape, weight, eps)
pytorch-image-models/timm/layers/filter_response_norm.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Filter Response Norm in PyTorch
2
+
3
+ Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737
4
+
5
+ Hacked together by / Copyright 2021 Ross Wightman
6
+ """
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from .create_act import create_act_layer
11
+ from .trace_utils import _assert
12
+
13
+
14
+ def inv_instance_rms(x, eps: float = 1e-5):
15
+ rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype)
16
+ return rms.expand(x.shape)
17
+
18
+
19
+ class FilterResponseNormTlu2d(nn.Module):
20
+ def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_):
21
+ super(FilterResponseNormTlu2d, self).__init__()
22
+ self.apply_act = apply_act # apply activation (non-linearity)
23
+ self.rms = rms
24
+ self.eps = eps
25
+ self.weight = nn.Parameter(torch.ones(num_features))
26
+ self.bias = nn.Parameter(torch.zeros(num_features))
27
+ self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None
28
+ self.reset_parameters()
29
+
30
+ def reset_parameters(self):
31
+ nn.init.ones_(self.weight)
32
+ nn.init.zeros_(self.bias)
33
+ if self.tau is not None:
34
+ nn.init.zeros_(self.tau)
35
+
36
+ def forward(self, x):
37
+ _assert(x.dim() == 4, 'expected 4D input')
38
+ x_dtype = x.dtype
39
+ v_shape = (1, -1, 1, 1)
40
+ x = x * inv_instance_rms(x, self.eps)
41
+ x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype)
42
+ return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x
43
+
44
+
45
+ class FilterResponseNormAct2d(nn.Module):
46
+ def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_):
47
+ super(FilterResponseNormAct2d, self).__init__()
48
+ if act_layer is not None and apply_act:
49
+ self.act = create_act_layer(act_layer, inplace=inplace)
50
+ else:
51
+ self.act = nn.Identity()
52
+ self.rms = rms
53
+ self.eps = eps
54
+ self.weight = nn.Parameter(torch.ones(num_features))
55
+ self.bias = nn.Parameter(torch.zeros(num_features))
56
+ self.reset_parameters()
57
+
58
+ def reset_parameters(self):
59
+ nn.init.ones_(self.weight)
60
+ nn.init.zeros_(self.bias)
61
+
62
+ def forward(self, x):
63
+ _assert(x.dim() == 4, 'expected 4D input')
64
+ x_dtype = x.dtype
65
+ v_shape = (1, -1, 1, 1)
66
+ x = x * inv_instance_rms(x, self.eps)
67
+ x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype)
68
+ return self.act(x)
pytorch-image-models/timm/layers/format.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Union
3
+
4
+ import torch
5
+
6
+
7
+ class Format(str, Enum):
8
+ NCHW = 'NCHW'
9
+ NHWC = 'NHWC'
10
+ NCL = 'NCL'
11
+ NLC = 'NLC'
12
+
13
+
14
+ FormatT = Union[str, Format]
15
+
16
+
17
+ def get_spatial_dim(fmt: FormatT):
18
+ fmt = Format(fmt)
19
+ if fmt is Format.NLC:
20
+ dim = (1,)
21
+ elif fmt is Format.NCL:
22
+ dim = (2,)
23
+ elif fmt is Format.NHWC:
24
+ dim = (1, 2)
25
+ else:
26
+ dim = (2, 3)
27
+ return dim
28
+
29
+
30
+ def get_channel_dim(fmt: FormatT):
31
+ fmt = Format(fmt)
32
+ if fmt is Format.NHWC:
33
+ dim = 3
34
+ elif fmt is Format.NLC:
35
+ dim = 2
36
+ else:
37
+ dim = 1
38
+ return dim
39
+
40
+
41
+ def nchw_to(x: torch.Tensor, fmt: Format):
42
+ if fmt == Format.NHWC:
43
+ x = x.permute(0, 2, 3, 1)
44
+ elif fmt == Format.NLC:
45
+ x = x.flatten(2).transpose(1, 2)
46
+ elif fmt == Format.NCL:
47
+ x = x.flatten(2)
48
+ return x
49
+
50
+
51
+ def nhwc_to(x: torch.Tensor, fmt: Format):
52
+ if fmt == Format.NCHW:
53
+ x = x.permute(0, 3, 1, 2)
54
+ elif fmt == Format.NLC:
55
+ x = x.flatten(1, 2)
56
+ elif fmt == Format.NCL:
57
+ x = x.flatten(1, 2).transpose(1, 2)
58
+ return x
pytorch-image-models/timm/layers/gather_excite.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Gather-Excite Attention Block
2
+
3
+ Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348
4
+
5
+ Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet
6
+
7
+ I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another
8
+ impl that covers all of the cases.
9
+
10
+ NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation
11
+
12
+ Hacked together by / Copyright 2021 Ross Wightman
13
+ """
14
+ import math
15
+
16
+ from torch import nn as nn
17
+ import torch.nn.functional as F
18
+
19
+ from .create_act import create_act_layer, get_act_layer
20
+ from .create_conv2d import create_conv2d
21
+ from .helpers import make_divisible
22
+ from .mlp import ConvMlp
23
+
24
+
25
+ class GatherExcite(nn.Module):
26
+ """ Gather-Excite Attention Module
27
+ """
28
+ def __init__(
29
+ self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True,
30
+ rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False,
31
+ act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'):
32
+ super(GatherExcite, self).__init__()
33
+ self.add_maxpool = add_maxpool
34
+ act_layer = get_act_layer(act_layer)
35
+ self.extent = extent
36
+ if extra_params:
37
+ self.gather = nn.Sequential()
38
+ if extent == 0:
39
+ assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params'
40
+ self.gather.add_module(
41
+ 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True))
42
+ if norm_layer:
43
+ self.gather.add_module(f'norm1', nn.BatchNorm2d(channels))
44
+ else:
45
+ assert extent % 2 == 0
46
+ num_conv = int(math.log2(extent))
47
+ for i in range(num_conv):
48
+ self.gather.add_module(
49
+ f'conv{i + 1}',
50
+ create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True))
51
+ if norm_layer:
52
+ self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels))
53
+ if i != num_conv - 1:
54
+ self.gather.add_module(f'act{i + 1}', act_layer(inplace=True))
55
+ else:
56
+ self.gather = None
57
+ if self.extent == 0:
58
+ self.gk = 0
59
+ self.gs = 0
60
+ else:
61
+ assert extent % 2 == 0
62
+ self.gk = self.extent * 2 - 1
63
+ self.gs = self.extent
64
+
65
+ if not rd_channels:
66
+ rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
67
+ self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity()
68
+ self.gate = create_act_layer(gate_layer)
69
+
70
+ def forward(self, x):
71
+ size = x.shape[-2:]
72
+ if self.gather is not None:
73
+ x_ge = self.gather(x)
74
+ else:
75
+ if self.extent == 0:
76
+ # global extent
77
+ x_ge = x.mean(dim=(2, 3), keepdims=True)
78
+ if self.add_maxpool:
79
+ # experimental codepath, may remove or change
80
+ x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True)
81
+ else:
82
+ x_ge = F.avg_pool2d(
83
+ x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False)
84
+ if self.add_maxpool:
85
+ # experimental codepath, may remove or change
86
+ x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2)
87
+ x_ge = self.mlp(x_ge)
88
+ if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1:
89
+ x_ge = F.interpolate(x_ge, size=size)
90
+ return x * self.gate(x_ge)
pytorch-image-models/timm/layers/global_context.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Global Context Attention Block
2
+
3
+ Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond`
4
+ - https://arxiv.org/abs/1904.11492
5
+
6
+ Official code consulted as reference: https://github.com/xvjiarui/GCNet
7
+
8
+ Hacked together by / Copyright 2021 Ross Wightman
9
+ """
10
+ from torch import nn as nn
11
+ import torch.nn.functional as F
12
+
13
+ from .create_act import create_act_layer, get_act_layer
14
+ from .helpers import make_divisible
15
+ from .mlp import ConvMlp
16
+ from .norm import LayerNorm2d
17
+
18
+
19
+ class GlobalContext(nn.Module):
20
+
21
+ def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False,
22
+ rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'):
23
+ super(GlobalContext, self).__init__()
24
+ act_layer = get_act_layer(act_layer)
25
+
26
+ self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None
27
+
28
+ if rd_channels is None:
29
+ rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
30
+ if fuse_add:
31
+ self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
32
+ else:
33
+ self.mlp_add = None
34
+ if fuse_scale:
35
+ self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
36
+ else:
37
+ self.mlp_scale = None
38
+
39
+ self.gate = create_act_layer(gate_layer)
40
+ self.init_last_zero = init_last_zero
41
+ self.reset_parameters()
42
+
43
+ def reset_parameters(self):
44
+ if self.conv_attn is not None:
45
+ nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu')
46
+ if self.mlp_add is not None:
47
+ nn.init.zeros_(self.mlp_add.fc2.weight)
48
+
49
+ def forward(self, x):
50
+ B, C, H, W = x.shape
51
+
52
+ if self.conv_attn is not None:
53
+ attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W)
54
+ attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1)
55
+ context = x.reshape(B, C, H * W).unsqueeze(1) @ attn
56
+ context = context.view(B, C, 1, 1)
57
+ else:
58
+ context = x.mean(dim=(2, 3), keepdim=True)
59
+
60
+ if self.mlp_scale is not None:
61
+ mlp_x = self.mlp_scale(context)
62
+ x = x * self.gate(mlp_x)
63
+ if self.mlp_add is not None:
64
+ mlp_x = self.mlp_add(context)
65
+ x = x + mlp_x
66
+
67
+ return x
pytorch-image-models/timm/layers/halo_attn.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Halo Self Attention
2
+
3
+ Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
4
+ - https://arxiv.org/abs/2103.12731
5
+
6
+ @misc{2103.12731,
7
+ Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and
8
+ Jonathon Shlens},
9
+ Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones},
10
+ Year = {2021},
11
+ }
12
+
13
+ Status:
14
+ This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me.
15
+ The attention mechanism works but it's slow as implemented.
16
+
17
+ Hacked together by / Copyright 2021 Ross Wightman
18
+ """
19
+ from typing import List
20
+
21
+ import torch
22
+ from torch import nn
23
+ import torch.nn.functional as F
24
+
25
+ from .helpers import make_divisible
26
+ from .weight_init import trunc_normal_
27
+ from .trace_utils import _assert
28
+
29
+
30
+ def rel_logits_1d(q, rel_k, permute_mask: List[int]):
31
+ """ Compute relative logits along one dimension
32
+
33
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
34
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
35
+
36
+ Args:
37
+ q: (batch, height, width, dim)
38
+ rel_k: (2 * window - 1, dim)
39
+ permute_mask: permute output dim according to this
40
+ """
41
+ B, H, W, dim = q.shape
42
+ rel_size = rel_k.shape[0]
43
+ win_size = (rel_size + 1) // 2
44
+
45
+ x = (q @ rel_k.transpose(-1, -2))
46
+ x = x.reshape(-1, W, rel_size)
47
+
48
+ # pad to shift from relative to absolute indexing
49
+ x_pad = F.pad(x, [0, 1]).flatten(1)
50
+ x_pad = F.pad(x_pad, [0, rel_size - W])
51
+
52
+ # reshape and slice out the padded elements
53
+ x_pad = x_pad.reshape(-1, W + 1, rel_size)
54
+ x = x_pad[:, :W, win_size - 1:]
55
+
56
+ # reshape and tile
57
+ x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1)
58
+ return x.permute(permute_mask)
59
+
60
+
61
+ class PosEmbedRel(nn.Module):
62
+ """ Relative Position Embedding
63
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
64
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
65
+
66
+ """
67
+ def __init__(self, block_size, win_size, dim_head, scale):
68
+ """
69
+ Args:
70
+ block_size (int): block size
71
+ win_size (int): neighbourhood window size
72
+ dim_head (int): attention head dim
73
+ scale (float): scale factor (for init)
74
+ """
75
+ super().__init__()
76
+ self.block_size = block_size
77
+ self.dim_head = dim_head
78
+ self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
79
+ self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
80
+
81
+ def forward(self, q):
82
+ B, BB, HW, _ = q.shape
83
+
84
+ # relative logits in width dimension.
85
+ q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
86
+ rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
87
+
88
+ # relative logits in height dimension.
89
+ q = q.transpose(1, 2)
90
+ rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
91
+
92
+ rel_logits = rel_logits_h + rel_logits_w
93
+ rel_logits = rel_logits.reshape(B, BB, HW, -1)
94
+ return rel_logits
95
+
96
+
97
+ class HaloAttn(nn.Module):
98
+ """ Halo Attention
99
+
100
+ Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
101
+ - https://arxiv.org/abs/2103.12731
102
+
103
+ The internal dimensions of the attention module are controlled by the interaction of several arguments.
104
+ * the output dimension of the module is specified by dim_out, which falls back to input dim if not set
105
+ * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
106
+ * the query and key (qk) dimensions are determined by
107
+ * num_heads * dim_head if dim_head is not None
108
+ * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
109
+ * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
110
+
111
+ Args:
112
+ dim (int): input dimension to the module
113
+ dim_out (int): output dimension of the module, same as dim if not set
114
+ feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda)
115
+ stride: output stride of the module, query downscaled if > 1 (default: 1).
116
+ num_heads: parallel attention heads (default: 8).
117
+ dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
118
+ block_size (int): size of blocks. (default: 8)
119
+ halo_size (int): size of halo overlap. (default: 3)
120
+ qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
121
+ qkv_bias (bool) : add bias to q, k, and v projections
122
+ avg_down (bool): use average pool downsample instead of strided query blocks
123
+ scale_pos_embed (bool): scale the position embedding as well as Q @ K
124
+ """
125
+ def __init__(
126
+ self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3,
127
+ qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False):
128
+ super().__init__()
129
+ dim_out = dim_out or dim
130
+ assert dim_out % num_heads == 0
131
+ assert stride in (1, 2)
132
+ self.num_heads = num_heads
133
+ self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
134
+ self.dim_head_v = dim_out // self.num_heads
135
+ self.dim_out_qk = num_heads * self.dim_head_qk
136
+ self.dim_out_v = num_heads * self.dim_head_v
137
+ self.scale = self.dim_head_qk ** -0.5
138
+ self.scale_pos_embed = scale_pos_embed
139
+ self.block_size = self.block_size_ds = block_size
140
+ self.halo_size = halo_size
141
+ self.win_size = block_size + halo_size * 2 # neighbourhood window size
142
+ self.block_stride = 1
143
+ use_avg_pool = False
144
+ if stride > 1:
145
+ use_avg_pool = avg_down or block_size % stride != 0
146
+ self.block_stride = 1 if use_avg_pool else stride
147
+ self.block_size_ds = self.block_size // self.block_stride
148
+
149
+ # FIXME not clear if this stride behaviour is what the paper intended
150
+ # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
151
+ # data in unfolded block form. I haven't wrapped my head around how that'd look.
152
+ self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias)
153
+ self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias)
154
+
155
+ self.pos_embed = PosEmbedRel(
156
+ block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale)
157
+
158
+ self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity()
159
+
160
+ self.reset_parameters()
161
+
162
+ def reset_parameters(self):
163
+ std = self.q.weight.shape[1] ** -0.5 # fan-in
164
+ trunc_normal_(self.q.weight, std=std)
165
+ trunc_normal_(self.kv.weight, std=std)
166
+ trunc_normal_(self.pos_embed.height_rel, std=self.scale)
167
+ trunc_normal_(self.pos_embed.width_rel, std=self.scale)
168
+
169
+ def forward(self, x):
170
+ B, C, H, W = x.shape
171
+ _assert(H % self.block_size == 0, '')
172
+ _assert(W % self.block_size == 0, '')
173
+ num_h_blocks = H // self.block_size
174
+ num_w_blocks = W // self.block_size
175
+ num_blocks = num_h_blocks * num_w_blocks
176
+
177
+ q = self.q(x)
178
+ # unfold
179
+ q = q.reshape(
180
+ -1, self.dim_head_qk,
181
+ num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4)
182
+ # B, num_heads * dim_head * block_size ** 2, num_blocks
183
+ q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3)
184
+ # B * num_heads, num_blocks, block_size ** 2, dim_head
185
+
186
+ kv = self.kv(x)
187
+ # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not
188
+ # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach.
189
+ # FIXME figure out how to switch impl between this and conv2d if XLA being used.
190
+ kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size])
191
+ kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape(
192
+ B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1)
193
+ k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1)
194
+ # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v
195
+
196
+ if self.scale_pos_embed:
197
+ attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale
198
+ else:
199
+ attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q)
200
+ # B * num_heads, num_blocks, block_size ** 2, win_size ** 2
201
+ attn = attn.softmax(dim=-1)
202
+
203
+ out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks
204
+ # fold
205
+ out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks)
206
+ out = out.permute(0, 3, 1, 4, 2).contiguous().view(
207
+ B, self.dim_out_v, H // self.block_stride, W // self.block_stride)
208
+ # B, dim_out, H // block_stride, W // block_stride
209
+ out = self.pool(out)
210
+ return out
211
+
212
+
213
+ """ Three alternatives for overlapping windows.
214
+
215
+ `.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold()
216
+
217
+ if is_xla:
218
+ # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is
219
+ # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment.
220
+ WW = self.win_size ** 2
221
+ pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size)
222
+ kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size)
223
+ elif self.stride_tricks:
224
+ kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous()
225
+ kv = kv.as_strided((
226
+ B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks),
227
+ stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size))
228
+ else:
229
+ kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size)
230
+
231
+ kv = kv.reshape(
232
+ B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3)
233
+ """
pytorch-image-models/timm/layers/hybrid_embed.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Image to Patch Hybird Embedding Layer
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ import logging
6
+ import math
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ from torch import nn as nn
11
+ import torch.nn.functional as F
12
+
13
+ from .format import Format, nchw_to
14
+ from .helpers import to_2tuple
15
+ from .patch_embed import resample_patch_embed
16
+
17
+
18
+ _logger = logging.getLogger(__name__)
19
+
20
+
21
+ class HybridEmbed(nn.Module):
22
+ """ CNN Feature Map Embedding
23
+ Extract feature map from CNN, flatten, project to embedding dim.
24
+ """
25
+ output_fmt: Format
26
+ dynamic_img_pad: torch.jit.Final[bool]
27
+
28
+ def __init__(
29
+ self,
30
+ backbone: nn.Module,
31
+ img_size: Union[int, Tuple[int, int]] = 224,
32
+ patch_size: Union[int, Tuple[int, int]] = 1,
33
+ feature_size: Optional[Union[int, Tuple[int, int]]] = None,
34
+ feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
35
+ in_chans: int = 3,
36
+ embed_dim: int = 768,
37
+ bias: bool = True,
38
+ proj: bool = True,
39
+ flatten: bool = True,
40
+ output_fmt: Optional[str] = None,
41
+ strict_img_size: bool = True,
42
+ dynamic_img_pad: bool = False,
43
+ ):
44
+ super().__init__()
45
+ assert isinstance(backbone, nn.Module)
46
+ self.backbone = backbone
47
+ self.in_chans = in_chans
48
+ (
49
+ self.img_size,
50
+ self.patch_size,
51
+ self.feature_size,
52
+ self.feature_ratio,
53
+ self.feature_dim,
54
+ self.grid_size,
55
+ self.num_patches,
56
+ ) = self._init_backbone(
57
+ img_size=img_size,
58
+ patch_size=patch_size,
59
+ feature_size=feature_size,
60
+ feature_ratio=feature_ratio,
61
+ )
62
+
63
+ if output_fmt is not None:
64
+ self.flatten = False
65
+ self.output_fmt = Format(output_fmt)
66
+ else:
67
+ # flatten spatial dim and transpose to channels last, kept for bwd compat
68
+ self.flatten = flatten
69
+ self.output_fmt = Format.NCHW
70
+ self.strict_img_size = strict_img_size
71
+ self.dynamic_img_pad = dynamic_img_pad
72
+ if not dynamic_img_pad:
73
+ assert self.feature_size[0] % self.patch_size[0] == 0 and self.feature_size[1] % self.patch_size[1] == 0
74
+
75
+ if proj:
76
+ self.proj = nn.Conv2d(
77
+ self.feature_dim,
78
+ embed_dim,
79
+ kernel_size=patch_size,
80
+ stride=patch_size,
81
+ bias=bias,
82
+ )
83
+ else:
84
+ assert self.feature_dim == embed_dim, \
85
+ f'The feature dim ({self.feature_dim} must match embed dim ({embed_dim}) when projection disabled.'
86
+ self.proj = nn.Identity()
87
+
88
+ def _init_backbone(
89
+ self,
90
+ img_size: Union[int, Tuple[int, int]] = 224,
91
+ patch_size: Union[int, Tuple[int, int]] = 1,
92
+ feature_size: Optional[Union[int, Tuple[int, int]]] = None,
93
+ feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
94
+ feature_dim: Optional[int] = None,
95
+ ):
96
+ img_size = to_2tuple(img_size)
97
+ patch_size = to_2tuple(patch_size)
98
+ if feature_size is None:
99
+ with torch.no_grad():
100
+ # NOTE Most reliable way of determining output dims is to run forward pass
101
+ training = self.backbone.training
102
+ if training:
103
+ self.backbone.eval()
104
+ o = self.backbone(torch.zeros(1, self.in_chans, img_size[0], img_size[1]))
105
+ if isinstance(o, (list, tuple)):
106
+ o = o[-1] # last feature if backbone outputs list/tuple of features
107
+ feature_size = o.shape[-2:]
108
+ feature_dim = o.shape[1]
109
+ self.backbone.train(training)
110
+ feature_ratio = tuple([s // f for s, f in zip(img_size, feature_size)])
111
+ else:
112
+ feature_size = to_2tuple(feature_size)
113
+ feature_ratio = to_2tuple(feature_ratio or 16)
114
+ if feature_dim is None:
115
+ if hasattr(self.backbone, 'feature_info'):
116
+ feature_dim = self.backbone.feature_info.channels()[-1]
117
+ else:
118
+ feature_dim = self.backbone.num_features
119
+ grid_size = tuple([f // p for f, p in zip(feature_size, patch_size)])
120
+ num_patches = grid_size[0] * grid_size[1]
121
+ return img_size, patch_size, feature_size, feature_ratio, feature_dim, grid_size, num_patches
122
+
123
+ def set_input_size(
124
+ self,
125
+ img_size: Optional[Union[int, Tuple[int, int]]] = None,
126
+ patch_size: Optional[Union[int, Tuple[int, int]]] = None,
127
+ feature_size: Optional[Union[int, Tuple[int, int]]] = None,
128
+ feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
129
+ feature_dim: Optional[int] = None,
130
+ ):
131
+ assert img_size is not None or patch_size is not None
132
+ img_size = img_size or self.img_size
133
+ new_patch_size = None
134
+ if patch_size is not None:
135
+ new_patch_size = to_2tuple(patch_size)
136
+ if new_patch_size is not None and new_patch_size != self.patch_size:
137
+ assert isinstance(self.proj, nn.Conv2d), 'HybridEmbed must have a projection layer to change patch size.'
138
+ with torch.no_grad():
139
+ new_proj = nn.Conv2d(
140
+ self.proj.in_channels,
141
+ self.proj.out_channels,
142
+ kernel_size=new_patch_size,
143
+ stride=new_patch_size,
144
+ bias=self.proj.bias is not None,
145
+ )
146
+ new_proj.weight.copy_(resample_patch_embed(self.proj.weight, new_patch_size, verbose=True))
147
+ if self.proj.bias is not None:
148
+ new_proj.bias.copy_(self.proj.bias)
149
+ self.proj = new_proj
150
+ patch_size = new_patch_size
151
+ patch_size = patch_size or self.patch_size
152
+
153
+ if img_size != self.img_size or patch_size != self.patch_size:
154
+ (
155
+ self.img_size,
156
+ self.patch_size,
157
+ self.feature_size,
158
+ self.feature_ratio,
159
+ self.feature_dim,
160
+ self.grid_size,
161
+ self.num_patches,
162
+ ) = self._init_backbone(
163
+ img_size=img_size,
164
+ patch_size=patch_size,
165
+ feature_size=feature_size,
166
+ feature_ratio=feature_ratio,
167
+ feature_dim=feature_dim,
168
+ )
169
+
170
+ def feat_ratio(self, as_scalar=True) -> Union[Tuple[int, int], int]:
171
+ total_reduction = (
172
+ self.feature_ratio[0] * self.patch_size[0],
173
+ self.feature_ratio[1] * self.patch_size[1]
174
+ )
175
+ if as_scalar:
176
+ return max(total_reduction)
177
+ else:
178
+ return total_reduction
179
+
180
+ def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]:
181
+ """ Get feature grid size taking account dynamic padding and backbone network feat reduction
182
+ """
183
+ feat_size = (img_size[0] // self.feature_ratio[0], img_size[1] // self.feature_ratio[1])
184
+ if self.dynamic_img_pad:
185
+ return math.ceil(feat_size[0] / self.patch_size[0]), math.ceil(feat_size[1] / self.patch_size[1])
186
+ else:
187
+ return feat_size[0] // self.patch_size[0], feat_size[1] // self.patch_size[1]
188
+
189
+ @torch.jit.ignore
190
+ def set_grad_checkpointing(self, enable: bool = True):
191
+ if hasattr(self.backbone, 'set_grad_checkpointing'):
192
+ self.backbone.set_grad_checkpointing(enable=enable)
193
+ elif hasattr(self.backbone, 'grad_checkpointing'):
194
+ self.backbone.grad_checkpointing = enable
195
+
196
+ def forward(self, x):
197
+ x = self.backbone(x)
198
+ if isinstance(x, (list, tuple)):
199
+ x = x[-1] # last feature if backbone outputs list/tuple of features
200
+ _, _, H, W = x.shape
201
+ if self.dynamic_img_pad:
202
+ pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
203
+ pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
204
+ x = F.pad(x, (0, pad_w, 0, pad_h))
205
+ x = self.proj(x)
206
+ if self.flatten:
207
+ x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
208
+ elif self.output_fmt != Format.NCHW:
209
+ x = nchw_to(x, self.output_fmt)
210
+ return x
211
+
212
+
213
+ class HybridEmbedWithSize(HybridEmbed):
214
+ """ CNN Feature Map Embedding
215
+ Extract feature map from CNN, flatten, project to embedding dim.
216
+ """
217
+ def __init__(
218
+ self,
219
+ backbone: nn.Module,
220
+ img_size: Union[int, Tuple[int, int]] = 224,
221
+ patch_size: Union[int, Tuple[int, int]] = 1,
222
+ feature_size: Optional[Union[int, Tuple[int, int]]] = None,
223
+ feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
224
+ in_chans: int = 3,
225
+ embed_dim: int = 768,
226
+ bias=True,
227
+ proj=True,
228
+ ):
229
+ super().__init__(
230
+ backbone=backbone,
231
+ img_size=img_size,
232
+ patch_size=patch_size,
233
+ feature_size=feature_size,
234
+ feature_ratio=feature_ratio,
235
+ in_chans=in_chans,
236
+ embed_dim=embed_dim,
237
+ bias=bias,
238
+ proj=proj,
239
+ )
240
+
241
+ @torch.jit.ignore
242
+ def set_grad_checkpointing(self, enable: bool = True):
243
+ if hasattr(self.backbone, 'set_grad_checkpointing'):
244
+ self.backbone.set_grad_checkpointing(enable=enable)
245
+ elif hasattr(self.backbone, 'grad_checkpointing'):
246
+ self.backbone.grad_checkpointing = enable
247
+
248
+ def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
249
+ x = self.backbone(x)
250
+ if isinstance(x, (list, tuple)):
251
+ x = x[-1] # last feature if backbone outputs list/tuple of features
252
+ x = self.proj(x)
253
+ return x.flatten(2).transpose(1, 2), x.shape[-2:]
pytorch-image-models/timm/layers/inplace_abn.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+
4
+ try:
5
+ from inplace_abn.functions import inplace_abn, inplace_abn_sync
6
+ has_iabn = True
7
+ except ImportError:
8
+ has_iabn = False
9
+
10
+ def inplace_abn(x, weight, bias, running_mean, running_var,
11
+ training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01):
12
+ raise ImportError(
13
+ "Please install InplaceABN:'pip install git+https://github.com/mapillary/[email protected]'")
14
+
15
+ def inplace_abn_sync(**kwargs):
16
+ inplace_abn(**kwargs)
17
+
18
+
19
+ class InplaceAbn(nn.Module):
20
+ """Activated Batch Normalization
21
+
22
+ This gathers a BatchNorm and an activation function in a single module
23
+
24
+ Parameters
25
+ ----------
26
+ num_features : int
27
+ Number of feature channels in the input and output.
28
+ eps : float
29
+ Small constant to prevent numerical issues.
30
+ momentum : float
31
+ Momentum factor applied to compute running statistics.
32
+ affine : bool
33
+ If `True` apply learned scale and shift transformation after normalization.
34
+ act_layer : str or nn.Module type
35
+ Name or type of the activation functions, one of: `leaky_relu`, `elu`
36
+ act_param : float
37
+ Negative slope for the `leaky_relu` activation.
38
+ """
39
+
40
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True,
41
+ act_layer="leaky_relu", act_param=0.01, drop_layer=None):
42
+ super(InplaceAbn, self).__init__()
43
+ self.num_features = num_features
44
+ self.affine = affine
45
+ self.eps = eps
46
+ self.momentum = momentum
47
+ if apply_act:
48
+ if isinstance(act_layer, str):
49
+ assert act_layer in ('leaky_relu', 'elu', 'identity', '')
50
+ self.act_name = act_layer if act_layer else 'identity'
51
+ else:
52
+ # convert act layer passed as type to string
53
+ if act_layer == nn.ELU:
54
+ self.act_name = 'elu'
55
+ elif act_layer == nn.LeakyReLU:
56
+ self.act_name = 'leaky_relu'
57
+ elif act_layer is None or act_layer == nn.Identity:
58
+ self.act_name = 'identity'
59
+ else:
60
+ assert False, f'Invalid act layer {act_layer.__name__} for IABN'
61
+ else:
62
+ self.act_name = 'identity'
63
+ self.act_param = act_param
64
+ if self.affine:
65
+ self.weight = nn.Parameter(torch.ones(num_features))
66
+ self.bias = nn.Parameter(torch.zeros(num_features))
67
+ else:
68
+ self.register_parameter('weight', None)
69
+ self.register_parameter('bias', None)
70
+ self.register_buffer('running_mean', torch.zeros(num_features))
71
+ self.register_buffer('running_var', torch.ones(num_features))
72
+ self.reset_parameters()
73
+
74
+ def reset_parameters(self):
75
+ nn.init.constant_(self.running_mean, 0)
76
+ nn.init.constant_(self.running_var, 1)
77
+ if self.affine:
78
+ nn.init.constant_(self.weight, 1)
79
+ nn.init.constant_(self.bias, 0)
80
+
81
+ def forward(self, x):
82
+ output = inplace_abn(
83
+ x, self.weight, self.bias, self.running_mean, self.running_var,
84
+ self.training, self.momentum, self.eps, self.act_name, self.act_param)
85
+ if isinstance(output, tuple):
86
+ output = output[0]
87
+ return output
pytorch-image-models/timm/layers/lambda_layer.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Lambda Layer
2
+
3
+ Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention`
4
+ - https://arxiv.org/abs/2102.08602
5
+
6
+ @misc{2102.08602,
7
+ Author = {Irwan Bello},
8
+ Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention},
9
+ Year = {2021},
10
+ }
11
+
12
+ Status:
13
+ This impl is a WIP. Code snippets in the paper were used as reference but
14
+ good chance some details are missing/wrong.
15
+
16
+ I've only implemented local lambda conv based pos embeddings.
17
+
18
+ For a PyTorch impl that includes other embedding options checkout
19
+ https://github.com/lucidrains/lambda-networks
20
+
21
+ Hacked together by / Copyright 2021 Ross Wightman
22
+ """
23
+ import torch
24
+ from torch import nn
25
+ import torch.nn.functional as F
26
+
27
+ from .grid import ndgrid
28
+ from .helpers import to_2tuple, make_divisible
29
+ from .weight_init import trunc_normal_
30
+
31
+
32
+ def rel_pos_indices(size):
33
+ size = to_2tuple(size)
34
+ pos = torch.stack(ndgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1)
35
+ rel_pos = pos[:, None, :] - pos[:, :, None]
36
+ rel_pos[0] += size[0] - 1
37
+ rel_pos[1] += size[1] - 1
38
+ return rel_pos # 2, H * W, H * W
39
+
40
+
41
+ class LambdaLayer(nn.Module):
42
+ """Lambda Layer
43
+
44
+ Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention`
45
+ - https://arxiv.org/abs/2102.08602
46
+
47
+ NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add.
48
+
49
+ The internal dimensions of the lambda module are controlled via the interaction of several arguments.
50
+ * the output dimension of the module is specified by dim_out, which falls back to input dim if not set
51
+ * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
52
+ * the query (q) and key (k) dimension are determined by
53
+ * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None
54
+ * q = num_heads * dim_head, k = dim_head
55
+ * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set
56
+
57
+ Args:
58
+ dim (int): input dimension to the module
59
+ dim_out (int): output dimension of the module, same as dim if not set
60
+ feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W
61
+ stride (int): output stride of the module, avg pool used if stride == 2
62
+ num_heads (int): parallel attention heads.
63
+ dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
64
+ r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9)
65
+ qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
66
+ qkv_bias (bool): add bias to q, k, and v projections
67
+ """
68
+ def __init__(
69
+ self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9,
70
+ qk_ratio=1.0, qkv_bias=False):
71
+ super().__init__()
72
+ dim_out = dim_out or dim
73
+ assert dim_out % num_heads == 0, ' should be divided by num_heads'
74
+ self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
75
+ self.num_heads = num_heads
76
+ self.dim_v = dim_out // num_heads
77
+
78
+ self.qkv = nn.Conv2d(
79
+ dim,
80
+ num_heads * self.dim_qk + self.dim_qk + self.dim_v,
81
+ kernel_size=1, bias=qkv_bias)
82
+ self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk)
83
+ self.norm_v = nn.BatchNorm2d(self.dim_v)
84
+
85
+ if r is not None:
86
+ # local lambda convolution for pos
87
+ self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0))
88
+ self.pos_emb = None
89
+ self.rel_pos_indices = None
90
+ else:
91
+ # relative pos embedding
92
+ assert feat_size is not None
93
+ feat_size = to_2tuple(feat_size)
94
+ rel_size = [2 * s - 1 for s in feat_size]
95
+ self.conv_lambda = None
96
+ self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk))
97
+ self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False)
98
+
99
+ self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity()
100
+
101
+ self.reset_parameters()
102
+
103
+ def reset_parameters(self):
104
+ trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in
105
+ if self.conv_lambda is not None:
106
+ trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5)
107
+ if self.pos_emb is not None:
108
+ trunc_normal_(self.pos_emb, std=.02)
109
+
110
+ def forward(self, x):
111
+ B, C, H, W = x.shape
112
+ M = H * W
113
+ qkv = self.qkv(x)
114
+ q, k, v = torch.split(qkv, [
115
+ self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1)
116
+ q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K
117
+ v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V
118
+ k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M
119
+
120
+ content_lam = k @ v # B, K, V
121
+ content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V
122
+
123
+ if self.pos_emb is None:
124
+ position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K
125
+ position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V
126
+ else:
127
+ # FIXME relative pos embedding path not fully verified
128
+ pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1)
129
+ position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V
130
+ position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V
131
+
132
+ out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W
133
+ out = self.pool(out)
134
+ return out