diff --git a/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa5c7feed5cf9d61011b781e05da344ca5b9d05 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59ebdf777e5b2a9befac2b784266bdb986c3ae83 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffeec69be00c7069796c2e0fad5cd89828dcf3db Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f206859198e05a4b260de45147861b3d9f8d4c2 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfb5dd02fd8d3c1c532457059545ad8da67007ee Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e8bdf640028d7bbf3e7c7a7c8a0ecc068109869 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de4c660cc52afba6fc6e30bb199f70a93a6879e1 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1f511bd19b98211e6bce24d4e117f2559f72ee9 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0834801da197e521ca5cc97451e4bff4f9aedc0 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..183ccb59fd1de8053fb68438362b06681f28f476 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cee58a9f195abb31f9f3a217b88090edb0064708 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..387f26e50b953ea2707795b9ea6a721317a7260b Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25af48663e9eea9ed079a1b85f0f3341bc6767cc Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2ce44b1ae1abc534c962f84db8a63c41e858333 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4b3b8448220cc31b34588844f22726a019a6153 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a347f8dd9d6059ed8b2608ede198e66ff37e1445 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d525517688a2988f8956fa9eb79f83c9ab1586f Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8e0119ad92823e9c05f4ca336c580cc3709a59c Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1e5fb07ce12dfe63ba973717f17eda72f69e6f6 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2c650e043309e041d94cb4d8bb5b138ee68dc13 Binary files /dev/null and b/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/asymmetric_loss.py b/pytorch-image-models/timm/loss/asymmetric_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..6c1962b7249b8b87795162814ef383d5fa7c1b66 --- /dev/null +++ b/pytorch-image-models/timm/loss/asymmetric_loss.py @@ -0,0 +1,97 @@ +import torch +import torch.nn as nn + + +class AsymmetricLossMultiLabel(nn.Module): + def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): + super(AsymmetricLossMultiLabel, self).__init__() + + self.gamma_neg = gamma_neg + self.gamma_pos = gamma_pos + self.clip = clip + self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss + self.eps = eps + + def forward(self, x, y): + """" + Parameters + ---------- + x: input logits + y: targets (multi-label binarized vector) + """ + + # Calculating Probabilities + x_sigmoid = torch.sigmoid(x) + xs_pos = x_sigmoid + xs_neg = 1 - x_sigmoid + + # Asymmetric Clipping + if self.clip is not None and self.clip > 0: + xs_neg = (xs_neg + self.clip).clamp(max=1) + + # Basic CE calculation + los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) + los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) + loss = los_pos + los_neg + + # Asymmetric Focusing + if self.gamma_neg > 0 or self.gamma_pos > 0: + if self.disable_torch_grad_focal_loss: + torch.set_grad_enabled(False) + pt0 = xs_pos * y + pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p + pt = pt0 + pt1 + one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) + one_sided_w = torch.pow(1 - pt, one_sided_gamma) + if self.disable_torch_grad_focal_loss: + torch.set_grad_enabled(True) + loss *= one_sided_w + + return -loss.sum() + + +class AsymmetricLossSingleLabel(nn.Module): + def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): + super(AsymmetricLossSingleLabel, self).__init__() + + self.eps = eps + self.logsoftmax = nn.LogSoftmax(dim=-1) + self.targets_classes = [] # prevent gpu repeated memory allocation + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.reduction = reduction + + def forward(self, inputs, target, reduction=None): + """" + Parameters + ---------- + x: input logits + y: targets (1-hot vector) + """ + + num_classes = inputs.size()[-1] + log_preds = self.logsoftmax(inputs) + self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) + + # ASL weights + targets = self.targets_classes + anti_targets = 1 - targets + xs_pos = torch.exp(log_preds) + xs_neg = 1 - xs_pos + xs_pos = xs_pos * targets + xs_neg = xs_neg * anti_targets + asymmetric_w = torch.pow(1 - xs_pos - xs_neg, + self.gamma_pos * targets + self.gamma_neg * anti_targets) + log_preds = log_preds * asymmetric_w + + if self.eps > 0: # label smoothing + self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes) + + # loss calculation + loss = - self.targets_classes.mul(log_preds) + + loss = loss.sum(dim=-1) + if self.reduction == 'mean': + loss = loss.mean() + + return loss diff --git a/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..421f6d846da97afde070cd674d0b2fd844f8314e Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80b92f175c9c82daaeb55550002b63028b501942 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7731a0ac491ca1af364984fe5c8db0852e464dc6 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e91e6302f649e1f7292cc911ac9b91bd44426f78 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..093482f937007d4a764715b7f73675176d6a4d04 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c334df1bf4a8f4223011b106286d55d0e4cac543 Binary files /dev/null and b/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/_efficientnet_builder.py b/pytorch-image-models/timm/models/_efficientnet_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..24e47e847741a5258ee4674d7fa1798c60c62cc7 --- /dev/null +++ b/pytorch-image-models/timm/models/_efficientnet_builder.py @@ -0,0 +1,576 @@ +""" EfficientNet, MobileNetV3, etc Builder + +Assembles EfficieNet and related network feature blocks from string definitions. +Handles stride, dilation calculations, and selects feature extraction points. + +Hacked together by / Copyright 2019, Ross Wightman +""" +from typing import Callable, Optional + +import logging +import math +import re +from copy import deepcopy +from functools import partial +from typing import Any, Dict, List + +import torch.nn as nn + +from timm.layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible, LayerType +from ._efficientnet_blocks import * +from ._manipulate import named_modules + +__all__ = ["EfficientNetBuilder", "BlockArgs", "decode_arch_def", "efficientnet_init_weights", + 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] + +_logger = logging.getLogger(__name__) + + +_DEBUG_BUILDER = False + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) + +BlockArgs = List[List[Dict[str, Any]]] + + +def get_bn_args_tf(): + return _BN_ARGS_TF.copy() + + +def resolve_bn_args(kwargs): + bn_args = {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +def resolve_act_layer(kwargs, default='relu'): + return get_act_layer(kwargs.pop('act_layer', default)) + + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) + + +def _log_info_if(msg, condition): + if condition: + _logger.info(msg) + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + skip = None + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + skip = False # force no skip connection + elif op == 'skip': + skip = True # force a skip connection + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = get_act_layer('relu') + elif v == 'r6': + value = get_act_layer('relu6') + elif v == 'hs': + value = get_act_layer('hard_swish') + elif v == 'sw': + value = get_act_layer('swish') # aka SiLU + elif v == 'mi': + value = get_act_layer('mish') + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + num_repeat = int(options['r']) + + # each type of block has different valid arguments, fill accordingly + block_args = dict( + block_type=block_type, + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + if block_type == 'ir': + block_args.update(dict( + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=start_kernel_size, + pw_kernel_size=end_kernel_size, + exp_ratio=float(options['e']), + se_ratio=float(options.get('se', 0.)), + noskip=skip is False, + s2d=int(options.get('d', 0)) > 0, + )) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args.update(dict( + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=end_kernel_size, + se_ratio=float(options.get('se', 0.)), + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or skip is False, + s2d=int(options.get('d', 0)) > 0, + )) + elif block_type == 'er': + block_args.update(dict( + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=end_kernel_size, + exp_ratio=float(options['e']), + force_in_chs=force_in_chs, + se_ratio=float(options.get('se', 0.)), + noskip=skip is False, + )) + elif block_type == 'cn': + block_args.update(dict( + kernel_size=int(options['k']), + skip=skip is True, + )) + elif block_type == 'uir': + # override exp / proj kernels for start/end in uir block + start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 0 + end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 0 + block_args.update(dict( + dw_kernel_size_start=start_kernel_size, # overload exp ks arg for dw start + dw_kernel_size_mid=_parse_ksize(options['k']), + dw_kernel_size_end=end_kernel_size, # overload pw ks arg for dw end + exp_ratio=float(options['e']), + se_ratio=float(options.get('se', 0.)), + noskip=skip is False, + )) + elif block_type == 'mha': + kv_dim = int(options['d']) + block_args.update(dict( + dw_kernel_size=_parse_ksize(options['k']), + num_heads=int(options['h']), + key_dim=kv_dim, + value_dim=kv_dim, + kv_stride=int(options.get('v', 1)), + noskip=skip is False, + )) + elif block_type == 'mqa': + kv_dim = int(options['d']) + block_args.update(dict( + dw_kernel_size=_parse_ksize(options['k']), + num_heads=int(options['h']), + key_dim=kv_dim, + value_dim=kv_dim, + kv_stride=int(options.get('v', 1)), + noskip=skip is False, + )) + else: + assert False, 'Unknown block type (%s)' % block_type + + if 'gs' in options: + block_args['group_size'] = int(options['gs']) + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def( + arch_def, + depth_multiplier=1.0, + depth_trunc='ceil', + experts_multiplier=1, + fix_first_last=False, + group_size=None, +): + """ Decode block architecture definition strings -> block kwargs + + Args: + arch_def: architecture definition strings, list of list of strings + depth_multiplier: network depth multiplier + depth_trunc: networ depth truncation mode when applying multiplier + experts_multiplier: CondConv experts multiplier + fix_first_last: fix first and last block depths when multiplier is applied + group_size: group size override for all blocks that weren't explicitly set in arch string + + Returns: + list of list of block kwargs + """ + arch_args = [] + if isinstance(depth_multiplier, tuple): + assert len(depth_multiplier) == len(arch_def) + else: + depth_multiplier = (depth_multiplier,) * len(arch_def) + for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + if group_size is not None: + ba.setdefault('group_size', group_size) + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) + return arch_args + + +class EfficientNetBuilder: + """ Build Trunk Blocks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + def __init__( + self, + output_stride: int = 32, + pad_type: str = '', + round_chs_fn: Callable = round_channels, + se_from_exp: bool = False, + act_layer: Optional[LayerType] = None, + norm_layer: Optional[LayerType] = None, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[LayerType] = None, + drop_path_rate: float = 0., + layer_scale_init_value: Optional[float] = None, + feature_location: str = '', + ): + self.output_stride = output_stride + self.pad_type = pad_type + self.round_chs_fn = round_chs_fn + self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs + self.act_layer = act_layer + self.norm_layer = norm_layer + self.aa_layer = aa_layer + self.se_layer = get_attn(se_layer) + try: + self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg + self.se_has_ratio = True + except TypeError: + self.se_has_ratio = False + self.drop_path_rate = drop_path_rate + self.layer_scale_init_value = layer_scale_init_value + if feature_location == 'depthwise': + # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense + _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") + feature_location = 'expansion' + self.feature_location = feature_location + assert feature_location in ('bottleneck', 'expansion', '') + self.verbose = _DEBUG_BUILDER + + # state updated during build, consumed by model + self.in_chs = None + self.features = [] + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self.round_chs_fn(ba['out_chs']) + s2d = ba.get('s2d', 0) + if s2d > 0: + # adjust while space2depth active + ba['out_chs'] *= 4 + if 'force_in_chs' in ba and ba['force_in_chs']: + # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl + ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + ba['norm_layer'] = self.norm_layer + ba['drop_path_rate'] = drop_path_rate + + if self.aa_layer is not None: + ba['aa_layer'] = self.aa_layer + + se_ratio = ba.pop('se_ratio', None) + if se_ratio and self.se_layer is not None: + if not self.se_from_exp: + # adjust se_ratio by expansion ratio if calculating se channels from block input + se_ratio /= ba.get('exp_ratio', 1.0) + if s2d == 1: + # adjust for start of space2depth + se_ratio /= 4 + if self.se_has_ratio: + ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) + else: + ba['se_layer'] = self.se_layer + + if bt == 'ir': + _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = EdgeResidual(**ba) + elif bt == 'cn': + _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = ConvBnAct(**ba) + elif bt == 'uir': + _log_info_if(' UniversalInvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = UniversalInvertedResidual(**ba, layer_scale_init_value=self.layer_scale_init_value) + elif bt == 'mqa': + _log_info_if(' MobileMultiQueryAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = MobileAttention(**ba, use_multi_query=True, layer_scale_init_value=self.layer_scale_init_value) + elif bt == 'mha': + _log_info_if(' MobileMultiHeadAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = MobileAttention(**ba, layer_scale_init_value=self.layer_scale_init_value) + else: + assert False, 'Unknown block type (%s) while building model.' % bt + + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + stages = [] + if model_block_args[0][0]['stride'] > 1: + # if the first block starts with a stride, we need to extract first level feat from stem + feature_info = dict(module='bn1', num_chs=in_chs, stage=0, reduction=current_stride) + self.features.append(feature_info) + + # outer list of block_args defines the stacks + space2depth = 0 + for stack_idx, stack_args in enumerate(model_block_args): + last_stack = stack_idx + 1 == len(model_block_args) + _log_info_if('Stack: {}'.format(stack_idx), self.verbose) + assert isinstance(stack_args, list) + + blocks = [] + # each stack (stage of blocks) contains a list of block arguments + for block_idx, block_args in enumerate(stack_args): + last_block = block_idx + 1 == len(stack_args) + _log_info_if(' Block: {}'.format(block_idx), self.verbose) + + assert block_args['stride'] in (1, 2) + if block_idx >= 1: # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + if not space2depth and block_args.pop('s2d', False): + assert block_args['stride'] == 1 + space2depth = 1 + + if space2depth > 0: + # FIXME s2d is a WIP + if space2depth == 2 and block_args['stride'] == 2: + block_args['stride'] = 1 + # to end s2d region, need to correct expansion and se ratio relative to input + block_args['exp_ratio'] /= 4 + space2depth = 0 + else: + block_args['s2d'] = space2depth + + extract_features = False + if last_block: + next_stack_idx = stack_idx + 1 + extract_features = next_stack_idx >= len(model_block_args) or \ + model_block_args[next_stack_idx][0]['stride'] > 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride), self.verbose) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + if space2depth == 1: + space2depth = 2 + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_info = dict( + stage=stack_idx + 1, + reduction=current_stride, + **block.feature_info(self.feature_location), + ) + leaf_name = feature_info.get('module', '') + if leaf_name: + feature_info['module'] = '.'.join([f'blocks.{stack_idx}.{block_idx}', leaf_name]) + else: + assert last_block + feature_info['module'] = f'blocks.{stack_idx}' + self.features.append(feature_info) + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + nn.init.uniform_(m.weight, -init_range, init_range) + nn.init.zeros_(m.bias) + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) + + # iterate and call any module.init_weights() fn, children first + for n, m in named_modules(model): + if hasattr(m, 'init_weights'): + m.init_weights() diff --git a/pytorch-image-models/timm/models/_helpers.py b/pytorch-image-models/timm/models/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5dc2445e0249e1c3f2893264cf53f573e4d0b0 --- /dev/null +++ b/pytorch-image-models/timm/models/_helpers.py @@ -0,0 +1,166 @@ +""" Model creation / weight loading / state_dict helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import os +from typing import Any, Callable, Dict, Optional, Union + +import torch +try: + import safetensors.torch + _has_safetensors = True +except ImportError: + _has_safetensors = False + +_logger = logging.getLogger(__name__) + +__all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint'] + + +def _remove_prefix(text, prefix): + # FIXME replace with 3.9 stdlib fn when min at 3.9 + if text.startswith(prefix): + return text[len(prefix):] + return text + + +def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]: + # 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training + cleaned_state_dict = {} + to_remove = ( + 'module.', # DDP wrapper + '_orig_mod.', # torchcompile dynamo wrapper + ) + for k, v in state_dict.items(): + for r in to_remove: + k = _remove_prefix(k, r) + cleaned_state_dict[k] = v + return cleaned_state_dict + + +def load_state_dict( + checkpoint_path: str, + use_ema: bool = True, + device: Union[str, torch.device] = 'cpu', + weights_only: bool = False, +) -> Dict[str, Any]: + if checkpoint_path and os.path.isfile(checkpoint_path): + # Check if safetensors or not and load weights accordingly + if str(checkpoint_path).endswith(".safetensors"): + assert _has_safetensors, "`pip install safetensors` to use .safetensors" + checkpoint = safetensors.torch.load_file(checkpoint_path, device=device) + else: + try: + checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=weights_only) + except TypeError: + checkpoint = torch.load(checkpoint_path, map_location=device) + + state_dict_key = '' + if isinstance(checkpoint, dict): + if use_ema and checkpoint.get('state_dict_ema', None) is not None: + state_dict_key = 'state_dict_ema' + elif use_ema and checkpoint.get('model_ema', None) is not None: + state_dict_key = 'model_ema' + elif 'state_dict' in checkpoint: + state_dict_key = 'state_dict' + elif 'model' in checkpoint: + state_dict_key = 'model' + state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint) + _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) + return state_dict + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_checkpoint( + model: torch.nn.Module, + checkpoint_path: str, + use_ema: bool = True, + device: Union[str, torch.device] = 'cpu', + strict: bool = True, + remap: bool = False, + filter_fn: Optional[Callable] = None, + weights_only: bool = False, +): + if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): + # numpy checkpoint, try to load via model specific load_pretrained fn + if hasattr(model, 'load_pretrained'): + model.load_pretrained(checkpoint_path) + else: + raise NotImplementedError('Model cannot load numpy checkpoint') + return + + state_dict = load_state_dict(checkpoint_path, use_ema, device=device, weights_only=weights_only) + if remap: + state_dict = remap_state_dict(state_dict, model) + elif filter_fn: + state_dict = filter_fn(state_dict, model) + incompatible_keys = model.load_state_dict(state_dict, strict=strict) + return incompatible_keys + + +def remap_state_dict( + state_dict: Dict[str, Any], + model: torch.nn.Module, + allow_reshape: bool = True +): + """ remap checkpoint by iterating over state dicts in order (ignoring original keys). + This assumes models (and originating state dict) were created with params registered in same order. + """ + out_dict = {} + for (ka, va), (kb, vb) in zip(model.state_dict().items(), state_dict.items()): + assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' + if va.shape != vb.shape: + if allow_reshape: + vb = vb.reshape(va.shape) + else: + assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' + out_dict[ka] = vb + return out_dict + + +def resume_checkpoint( + model: torch.nn.Module, + checkpoint_path: str, + optimizer: torch.optim.Optimizer = None, + loss_scaler: Any = None, + log_info: bool = True, +): + resume_epoch = None + if os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False) + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + if log_info: + _logger.info('Restoring model state from checkpoint...') + state_dict = clean_state_dict(checkpoint['state_dict']) + model.load_state_dict(state_dict) + + if optimizer is not None and 'optimizer' in checkpoint: + if log_info: + _logger.info('Restoring optimizer state from checkpoint...') + optimizer.load_state_dict(checkpoint['optimizer']) + + if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: + if log_info: + _logger.info('Restoring AMP loss scaler state from checkpoint...') + loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) + + if 'epoch' in checkpoint: + resume_epoch = checkpoint['epoch'] + if 'version' in checkpoint and checkpoint['version'] > 1: + resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save + + if log_info: + _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) + else: + model.load_state_dict(checkpoint) + if log_info: + _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) + return resume_epoch + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + diff --git a/pytorch-image-models/timm/models/_manipulate.py b/pytorch-image-models/timm/models/_manipulate.py new file mode 100644 index 0000000000000000000000000000000000000000..e689b39276650b3de9d6e8e2395a6057f6873074 --- /dev/null +++ b/pytorch-image-models/timm/models/_manipulate.py @@ -0,0 +1,278 @@ +import collections.abc +import math +import re +from collections import defaultdict +from itertools import chain +from typing import Any, Callable, Dict, Iterator, Tuple, Type, Union + +import torch +from torch import nn as nn +from torch.utils.checkpoint import checkpoint + +__all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv', + 'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq'] + + +def model_parameters(model: nn.Module, exclude_head: bool = False): + if exclude_head: + # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering + return [p for p in model.parameters()][:-2] + else: + return model.parameters() + + +def named_apply( + fn: Callable, + module: nn.Module, name='', + depth_first: bool = True, + include_root: bool = False, +) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +def named_modules( + module: nn.Module, + name: str = '', + depth_first: bool = True, + include_root: bool = False, +): + if not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + yield name, module + + +def named_modules_with_params( + module: nn.Module, + name: str = '', + depth_first: bool = True, + include_root: bool = False, +): + if module._parameters and not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules_with_params( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if module._parameters and depth_first and include_root: + yield name, module + + +MATCH_PREV_GROUP = (99999,) + + +def group_with_matcher( + named_objects: Iterator[Tuple[str, Any]], + group_matcher: Union[Dict, Callable], + return_values: bool = False, + reverse: bool = False +): + if isinstance(group_matcher, dict): + # dictionary matcher contains a dict of raw-string regex expr that must be compiled + compiled = [] + for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()): + if mspec is None: + continue + # map all matching specifications into 3-tuple (compiled re, prefix, suffix) + if isinstance(mspec, (tuple, list)): + # multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix) + for sspec in mspec: + compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])] + else: + compiled += [(re.compile(mspec), (group_ordinal,), None)] + group_matcher = compiled + + def _get_grouping(name): + if isinstance(group_matcher, (list, tuple)): + for match_fn, prefix, suffix in group_matcher: + r = match_fn.match(name) + if r: + parts = (prefix, r.groups(), suffix) + # map all tuple elem to int for numeric sort, filter out None entries + return tuple(map(float, chain.from_iterable(filter(None, parts)))) + return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal + else: + ord = group_matcher(name) + if not isinstance(ord, collections.abc.Iterable): + return ord, + return tuple(ord) + + # map layers into groups via ordinals (ints or tuples of ints) from matcher + grouping = defaultdict(list) + for k, v in named_objects: + grouping[_get_grouping(k)].append(v if return_values else k) + + # remap to integers + layer_id_to_param = defaultdict(list) + lid = -1 + for k in sorted(filter(lambda x: x is not None, grouping.keys())): + if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]: + lid += 1 + layer_id_to_param[lid].extend(grouping[k]) + + if reverse: + assert not return_values, "reverse mapping only sensible for name output" + # output reverse mapping + param_to_layer_id = {} + for lid, lm in layer_id_to_param.items(): + for n in lm: + param_to_layer_id[n] = lid + return param_to_layer_id + + return layer_id_to_param + + +def group_parameters( + module: nn.Module, + group_matcher, + return_values: bool = False, + reverse: bool = False, +): + return group_with_matcher( + module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse) + + +def group_modules( + module: nn.Module, + group_matcher, + return_values: bool = False, + reverse: bool = False, +): + return group_with_matcher( + named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse) + + +def flatten_modules( + named_modules: Iterator[Tuple[str, nn.Module]], + depth: int = 1, + prefix: Union[str, Tuple[str, ...]] = '', + module_types: Union[str, Tuple[Type[nn.Module]]] = 'sequential', +): + prefix_is_tuple = isinstance(prefix, tuple) + if isinstance(module_types, str): + if module_types == 'container': + module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict) + else: + module_types = (nn.Sequential,) + for name, module in named_modules: + if depth and isinstance(module, module_types): + yield from flatten_modules( + module.named_children(), + depth - 1, + prefix=(name,) if prefix_is_tuple else name, + module_types=module_types, + ) + else: + if prefix_is_tuple: + name = prefix + (name,) + yield name, module + else: + if prefix: + name = '.'.join([prefix, name]) + yield name, module + + +def checkpoint_seq( + functions, + x, + every=1, + flatten=False, + skip_last=False, + preserve_rng_state=True +): + r"""A helper function for checkpointing sequential models. + + Sequential models execute a list of modules/functions in order + (sequentially). Therefore, we can divide such a sequence into segments + and checkpoint each segment. All segments except run in :func:`torch.no_grad` + manner, i.e., not storing the intermediate activations. The inputs of each + checkpointed segment will be saved for re-running the segment in the backward pass. + + See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works. + + .. warning:: + Checkpointing currently only supports :func:`torch.autograd.backward` + and only if its `inputs` argument is not passed. :func:`torch.autograd.grad` + is not supported. + + .. warning: + At least one of the inputs needs to have :code:`requires_grad=True` if + grads are needed for model inputs, otherwise the checkpointed part of the + model won't have gradients. + + Args: + functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially. + x: A Tensor that is input to :attr:`functions` + every: checkpoint every-n functions (default: 1) + flatten (bool): flatten nn.Sequential of nn.Sequentials + skip_last (bool): skip checkpointing the last function in the sequence if True + preserve_rng_state (bool, optional, default=True): Omit stashing and restoring + the RNG state during each checkpoint. + + Returns: + Output of running :attr:`functions` sequentially on :attr:`*inputs` + + Example: + >>> model = nn.Sequential(...) + >>> input_var = checkpoint_seq(model, input_var, every=2) + """ + def run_function(start, end, functions): + def forward(_x): + for j in range(start, end + 1): + _x = functions[j](_x) + return _x + return forward + + if isinstance(functions, torch.nn.Sequential): + functions = functions.children() + if flatten: + functions = chain.from_iterable(functions) + if not isinstance(functions, (tuple, list)): + functions = tuple(functions) + + num_checkpointed = len(functions) + if skip_last: + num_checkpointed -= 1 + end = -1 + for start in range(0, num_checkpointed, every): + end = min(start + every - 1, num_checkpointed - 1) + x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state) + if skip_last: + return run_function(end + 1, len(functions) - 1, functions)(x) + return x + + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight diff --git a/pytorch-image-models/timm/models/_pretrained.py b/pytorch-image-models/timm/models/_pretrained.py new file mode 100644 index 0000000000000000000000000000000000000000..2938f8fe714d9bcfe5477b6f083defc31ee8e66a --- /dev/null +++ b/pytorch-image-models/timm/models/_pretrained.py @@ -0,0 +1,94 @@ +import copy +from collections import deque, defaultdict +from dataclasses import dataclass, field, replace, asdict +from typing import Any, Deque, Dict, Tuple, Optional, Union + + +__all__ = ['PretrainedCfg', 'filter_pretrained_cfg', 'DefaultCfg'] + + +@dataclass +class PretrainedCfg: + """ + """ + # weight source locations + url: Optional[Union[str, Tuple[str, str]]] = None # remote URL + file: Optional[str] = None # local / shared filesystem path + state_dict: Optional[Dict[str, Any]] = None # in-memory state dict + hf_hub_id: Optional[str] = None # Hugging Face Hub model id ('organization/model') + hf_hub_filename: Optional[str] = None # Hugging Face Hub filename (overrides default) + + source: Optional[str] = None # source of cfg / weight location used (url, file, hf-hub) + architecture: Optional[str] = None # architecture variant can be set when not implicit + tag: Optional[str] = None # pretrained tag of source + custom_load: bool = False # use custom model specific model.load_pretrained() (ie for npz files) + + # input / data config + input_size: Tuple[int, int, int] = (3, 224, 224) + test_input_size: Optional[Tuple[int, int, int]] = None + min_input_size: Optional[Tuple[int, int, int]] = None + fixed_input_size: bool = False + interpolation: str = 'bicubic' + crop_pct: float = 0.875 + test_crop_pct: Optional[float] = None + crop_mode: str = 'center' + mean: Tuple[float, ...] = (0.485, 0.456, 0.406) + std: Tuple[float, ...] = (0.229, 0.224, 0.225) + + # head / classifier config and meta-data + num_classes: int = 1000 + label_offset: Optional[int] = None + label_names: Optional[Tuple[str]] = None + label_descriptions: Optional[Dict[str, str]] = None + + # model attributes that vary with above or required for pretrained adaptation + pool_size: Optional[Tuple[int, ...]] = None + test_pool_size: Optional[Tuple[int, ...]] = None + first_conv: Optional[str] = None + classifier: Optional[str] = None + + license: Optional[str] = None + description: Optional[str] = None + origin_url: Optional[str] = None + paper_name: Optional[str] = None + paper_ids: Optional[Union[str, Tuple[str]]] = None + notes: Optional[Tuple[str]] = None + + @property + def has_weights(self): + return self.url or self.file or self.hf_hub_id + + def to_dict(self, remove_source=False, remove_null=True): + return filter_pretrained_cfg( + asdict(self), + remove_source=remove_source, + remove_null=remove_null + ) + + +def filter_pretrained_cfg(cfg, remove_source=False, remove_null=True): + filtered_cfg = {} + keep_null = {'pool_size', 'first_conv', 'classifier'} # always keep these keys, even if none + for k, v in cfg.items(): + if remove_source and k in {'url', 'file', 'hf_hub_id', 'hf_hub_id', 'hf_hub_filename', 'source'}: + continue + if remove_null and v is None and k not in keep_null: + continue + filtered_cfg[k] = v + return filtered_cfg + + +@dataclass +class DefaultCfg: + tags: Deque[str] = field(default_factory=deque) # priority queue of tags (first is default) + cfgs: Dict[str, PretrainedCfg] = field(default_factory=dict) # pretrained cfgs by tag + is_pretrained: bool = False # at least one of the configs has a pretrained source set + + @property + def default(self): + return self.cfgs[self.tags[0]] + + @property + def default_with_tag(self): + tag = self.tags[0] + return tag, self.cfgs[tag] diff --git a/pytorch-image-models/timm/models/_prune.py b/pytorch-image-models/timm/models/_prune.py new file mode 100644 index 0000000000000000000000000000000000000000..370b911f462470b38ad896887a91791c94944031 --- /dev/null +++ b/pytorch-image-models/timm/models/_prune.py @@ -0,0 +1,116 @@ +import os +import pkgutil +from copy import deepcopy + +from torch import nn as nn + +from timm.layers import Conv2dSame, BatchNormAct2d, Linear + +__all__ = ['extract_layer', 'set_layer', 'adapt_model_from_string', 'adapt_model_from_file'] + + +def extract_layer(model, layer): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + if not hasattr(model, 'module') and layer[0] == 'module': + layer = layer[1:] + for l in layer: + if hasattr(module, l): + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + else: + return module + return module + + +def set_layer(model, layer, val): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + lst_index = 0 + module2 = module + for l in layer: + if hasattr(module2, l): + if not l.isdigit(): + module2 = getattr(module2, l) + else: + module2 = module2[int(l)] + lst_index += 1 + lst_index -= 1 + for l in layer[:lst_index]: + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + l = layer[lst_index] + setattr(module, l, val) + + +def adapt_model_from_string(parent_module, model_string): + separator = '***' + state_dict = {} + lst_shape = model_string.split(separator) + for k in lst_shape: + k = k.split(':') + key = k[0] + shape = k[1][1:-1].split(',') + if shape[0] != '': + state_dict[key] = [int(i) for i in shape] + + new_module = deepcopy(parent_module) + for n, m in parent_module.named_modules(): + old_module = extract_layer(parent_module, n) + if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame): + if isinstance(old_module, Conv2dSame): + conv = Conv2dSame + else: + conv = nn.Conv2d + s = state_dict[n + '.weight'] + in_channels = s[1] + out_channels = s[0] + g = 1 + if old_module.groups > 1: + in_channels = out_channels + g = in_channels + new_conv = conv( + in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size, + bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation, + groups=g, stride=old_module.stride) + set_layer(new_module, n, new_conv) + elif isinstance(old_module, BatchNormAct2d): + new_bn = BatchNormAct2d( + state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + new_bn.drop = old_module.drop + new_bn.act = old_module.act + set_layer(new_module, n, new_bn) + elif isinstance(old_module, nn.BatchNorm2d): + new_bn = nn.BatchNorm2d( + num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + set_layer(new_module, n, new_bn) + elif isinstance(old_module, nn.Linear): + # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer? + num_features = state_dict[n + '.weight'][1] + new_fc = Linear( + in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None) + set_layer(new_module, n, new_fc) + if hasattr(new_module, 'num_features'): + if getattr(new_module, 'head_hidden_size', 0) == new_module.num_features: + new_module.head_hidden_size = num_features + new_module.num_features = num_features + + new_module.eval() + parent_module.eval() + + return new_module + + +def adapt_model_from_file(parent_module, model_variant): + adapt_data = pkgutil.get_data(__name__, os.path.join('_pruned', model_variant + '.txt')) + return adapt_model_from_string(parent_module, adapt_data.decode('utf-8').strip()) diff --git a/pytorch-image-models/timm/models/_pruned/efficientnet_b1_pruned.txt b/pytorch-image-models/timm/models/_pruned/efficientnet_b1_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..0972b527612b283fd242cc5eaeb6e767ea106c66 --- /dev/null +++ b/pytorch-image-models/timm/models/_pruned/efficientnet_b1_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000] \ No newline at end of file diff --git a/pytorch-image-models/timm/models/focalnet.py b/pytorch-image-models/timm/models/focalnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f747001c735b95927a441e880e22a66039b70803 --- /dev/null +++ b/pytorch-image-models/timm/models/focalnet.py @@ -0,0 +1,652 @@ +""" FocalNet + +As described in `Focal Modulation Networks` - https://arxiv.org/abs/2203.11926 + +Significant modifications and refactoring from the original impl at https://github.com/microsoft/FocalNet + +This impl is/has: +* fully convolutional, NCHW tensor layout throughout, seemed to have minimal performance impact but more flexible +* re-ordered downsample / layer so that striding always at beginning of layer (stage) +* no input size constraints or input resolution/H/W tracking through the model +* torchscript fixed and a number of quirks cleaned up +* feature extraction support via `features_only=True` +""" +# -------------------------------------------------------- +# FocalNets -- Focal Modulation Networks +# Copyright (c) 2022 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Jianwei Yang (jianwyan@microsoft.com) +# -------------------------------------------------------- +from functools import partial +from typing import Callable, Optional, Tuple + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import Mlp, DropPath, LayerNorm2d, trunc_normal_, ClassifierHead, NormMlpClassifierHead +from ._builder import build_model_with_cfg +from ._manipulate import named_apply +from ._registry import generate_default_cfgs, register_model + +__all__ = ['FocalNet'] + + +class FocalModulation(nn.Module): + def __init__( + self, + dim: int, + focal_window, + focal_level: int, + focal_factor: int = 2, + bias: bool = True, + use_post_norm: bool = False, + normalize_modulator: bool = False, + proj_drop: float = 0., + norm_layer: Callable = LayerNorm2d, + ): + super().__init__() + + self.dim = dim + self.focal_window = focal_window + self.focal_level = focal_level + self.focal_factor = focal_factor + self.use_post_norm = use_post_norm + self.normalize_modulator = normalize_modulator + self.input_split = [dim, dim, self.focal_level + 1] + + self.f = nn.Conv2d(dim, 2 * dim + (self.focal_level + 1), kernel_size=1, bias=bias) + self.h = nn.Conv2d(dim, dim, kernel_size=1, bias=bias) + + self.act = nn.GELU() + self.proj = nn.Conv2d(dim, dim, kernel_size=1) + self.proj_drop = nn.Dropout(proj_drop) + self.focal_layers = nn.ModuleList() + + self.kernel_sizes = [] + for k in range(self.focal_level): + kernel_size = self.focal_factor * k + self.focal_window + self.focal_layers.append(nn.Sequential( + nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=dim, padding=kernel_size // 2, bias=False), + nn.GELU(), + )) + self.kernel_sizes.append(kernel_size) + self.norm = norm_layer(dim) if self.use_post_norm else nn.Identity() + + def forward(self, x): + # pre linear projection + x = self.f(x) + q, ctx, gates = torch.split(x, self.input_split, 1) + + # context aggreation + ctx_all = 0 + for l, focal_layer in enumerate(self.focal_layers): + ctx = focal_layer(ctx) + ctx_all = ctx_all + ctx * gates[:, l:l + 1] + ctx_global = self.act(ctx.mean((2, 3), keepdim=True)) + ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:] + + # normalize context + if self.normalize_modulator: + ctx_all = ctx_all / (self.focal_level + 1) + + # focal modulation + x_out = q * self.h(ctx_all) + x_out = self.norm(x_out) + + # post linear projection + x_out = self.proj(x_out) + x_out = self.proj_drop(x_out) + return x_out + + +class LayerScale2d(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + + +class FocalNetBlock(nn.Module): + """ Focal Modulation Network Block. + """ + + def __init__( + self, + dim: int, + mlp_ratio: float = 4., + focal_level: int = 1, + focal_window: int = 3, + use_post_norm: bool = False, + use_post_norm_in_modulation: bool = False, + normalize_modulator: bool = False, + layerscale_value: float = 1e-4, + proj_drop: float = 0., + drop_path: float = 0., + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm2d, + ): + """ + Args: + dim: Number of input channels. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + focal_level: Number of focal levels. + focal_window: Focal window size at first focal level. + use_post_norm: Whether to use layer norm after modulation. + use_post_norm_in_modulation: Whether to use layer norm in modulation. + layerscale_value: Initial layerscale value. + proj_drop: Dropout rate. + drop_path: Stochastic depth rate. + act_layer: Activation layer. + norm_layer: Normalization layer. + """ + super().__init__() + self.dim = dim + self.mlp_ratio = mlp_ratio + + self.focal_window = focal_window + self.focal_level = focal_level + self.use_post_norm = use_post_norm + + self.norm1 = norm_layer(dim) if not use_post_norm else nn.Identity() + self.modulation = FocalModulation( + dim, + focal_window=focal_window, + focal_level=self.focal_level, + use_post_norm=use_post_norm_in_modulation, + normalize_modulator=normalize_modulator, + proj_drop=proj_drop, + norm_layer=norm_layer, + ) + self.norm1_post = norm_layer(dim) if use_post_norm else nn.Identity() + self.ls1 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) if not use_post_norm else nn.Identity() + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + use_conv=True, + ) + self.norm2_post = norm_layer(dim) if use_post_norm else nn.Identity() + self.ls2 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + + # Focal Modulation + x = self.norm1(x) + x = self.modulation(x) + x = self.norm1_post(x) + x = shortcut + self.drop_path1(self.ls1(x)) + + # FFN + x = x + self.drop_path2(self.ls2(self.norm2_post(self.mlp(self.norm2(x))))) + + return x + + +class FocalNetStage(nn.Module): + """ A basic Focal Transformer layer for one stage. + """ + + def __init__( + self, + dim: int, + out_dim: int, + depth: int, + mlp_ratio: float = 4., + downsample: bool = True, + focal_level: int = 1, + focal_window: int = 1, + use_overlap_down: bool = False, + use_post_norm: bool = False, + use_post_norm_in_modulation: bool = False, + normalize_modulator: bool = False, + layerscale_value: float = 1e-4, + proj_drop: float = 0., + drop_path: float = 0., + norm_layer: Callable = LayerNorm2d, + ): + """ + Args: + dim: Number of input channels. + out_dim: Number of output channels. + depth: Number of blocks. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + downsample: Downsample layer at start of the layer. + focal_level: Number of focal levels + focal_window: Focal window size at first focal level + use_overlap_down: User overlapped convolution in downsample layer. + use_post_norm: Whether to use layer norm after modulation. + use_post_norm_in_modulation: Whether to use layer norm in modulation. + layerscale_value: Initial layerscale value + proj_drop: Dropout rate for projections. + drop_path: Stochastic depth rate. + norm_layer: Normalization layer. + """ + super().__init__() + self.dim = dim + self.depth = depth + self.grad_checkpointing = False + + if downsample: + self.downsample = Downsample( + in_chs=dim, + out_chs=out_dim, + stride=2, + overlap=use_overlap_down, + norm_layer=norm_layer, + ) + else: + self.downsample = nn.Identity() + + # build blocks + self.blocks = nn.ModuleList([ + FocalNetBlock( + dim=out_dim, + mlp_ratio=mlp_ratio, + focal_level=focal_level, + focal_window=focal_window, + use_post_norm=use_post_norm, + use_post_norm_in_modulation=use_post_norm_in_modulation, + normalize_modulator=normalize_modulator, + layerscale_value=layerscale_value, + proj_drop=proj_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) + for i in range(depth)]) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + def forward(self, x): + x = self.downsample(x) + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + return x + + +class Downsample(nn.Module): + + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 4, + overlap: bool = False, + norm_layer: Optional[Callable] = None, + ): + """ + + Args: + in_chs: Number of input image channels. + out_chs: Number of linear projection output channels. + stride: Downsample stride. + overlap: Use overlapping convolutions if True. + norm_layer: Normalization layer. + """ + super().__init__() + self.stride = stride + padding = 0 + kernel_size = stride + if overlap: + assert stride in (2, 4) + if stride == 4: + kernel_size, padding = 7, 2 + elif stride == 2: + kernel_size, padding = 3, 1 + self.proj = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) + self.norm = norm_layer(out_chs) if norm_layer is not None else nn.Identity() + + def forward(self, x): + x = self.proj(x) + x = self.norm(x) + return x + + +class FocalNet(nn.Module): + """" Focal Modulation Networks (FocalNets) + """ + + def __init__( + self, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + embed_dim: int = 96, + depths: Tuple[int, ...] = (2, 2, 6, 2), + mlp_ratio: float = 4., + focal_levels: Tuple[int, ...] = (2, 2, 2, 2), + focal_windows: Tuple[int, ...] = (3, 3, 3, 3), + use_overlap_down: bool = False, + use_post_norm: bool = False, + use_post_norm_in_modulation: bool = False, + normalize_modulator: bool = False, + head_hidden_size: Optional[int] = None, + head_init_scale: float = 1.0, + layerscale_value: Optional[float] = None, + drop_rate: bool = 0., + proj_drop_rate: bool = 0., + drop_path_rate: bool = 0.1, + norm_layer: Callable = partial(LayerNorm2d, eps=1e-5), + ): + """ + Args: + in_chans: Number of input image channels. + num_classes: Number of classes for classification head. + embed_dim: Patch embedding dimension. + depths: Depth of each Focal Transformer layer. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + focal_levels: How many focal levels at all stages. Note that this excludes the finest-grain level. + focal_windows: The focal window size at all stages. + use_overlap_down: Whether to use convolutional embedding. + use_post_norm: Whether to use layernorm after modulation (it helps stablize training of large models) + layerscale_value: Value for layer scale. + drop_rate: Dropout rate. + drop_path_rate: Stochastic depth rate. + norm_layer: Normalization layer. + """ + super().__init__() + + self.num_layers = len(depths) + embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)] + + self.num_classes = num_classes + self.embed_dim = embed_dim + self.num_features = self.head_hidden_size = embed_dim[-1] + self.feature_info = [] + + self.stem = Downsample( + in_chs=in_chans, + out_chs=embed_dim[0], + overlap=use_overlap_down, + norm_layer=norm_layer, + ) + in_dim = embed_dim[0] + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + layers = [] + for i_layer in range(self.num_layers): + out_dim = embed_dim[i_layer] + layer = FocalNetStage( + dim=in_dim, + out_dim=out_dim, + depth=depths[i_layer], + mlp_ratio=mlp_ratio, + downsample=i_layer > 0, + focal_level=focal_levels[i_layer], + focal_window=focal_windows[i_layer], + use_overlap_down=use_overlap_down, + use_post_norm=use_post_norm, + use_post_norm_in_modulation=use_post_norm_in_modulation, + normalize_modulator=normalize_modulator, + layerscale_value=layerscale_value, + proj_drop=proj_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + ) + in_dim = out_dim + layers += [layer] + self.feature_info += [dict(num_chs=out_dim, reduction=4 * 2 ** i_layer, module=f'layers.{i_layer}')] + + self.layers = nn.Sequential(*layers) + + if head_hidden_size: + self.norm = nn.Identity() + self.head_hidden_size = head_hidden_size + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + hidden_size=head_hidden_size, + pool_type=global_pool, + drop_rate=drop_rate, + norm_layer=norm_layer, + ) + else: + self.norm = norm_layer(self.num_features) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate + ) + + named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {''} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=[ + (r'^layers\.(\d+)', None), + (r'^norm', (99999,)) + ] if coarse else [ + (r'^layers\.(\d+).downsample', (0,)), + (r'^layers\.(\d+)\.\w+\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + for l in self.layers: + l.set_grad_checkpointing(enable=enable) + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.layers(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name=None, head_init_scale=1.0): + if isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + if name and 'head.fc' in name: + module.weight.data.mul_(head_init_scale) + module.bias.data.mul_(head_init_scale) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.proj', 'classifier': 'head.fc', + 'license': 'mit', **kwargs + } + + +default_cfgs = generate_default_cfgs({ + "focalnet_tiny_srf.ms_in1k": _cfg( + hf_hub_id='timm/'), + "focalnet_small_srf.ms_in1k": _cfg( + hf_hub_id='timm/'), + "focalnet_base_srf.ms_in1k": _cfg( + hf_hub_id='timm/'), + "focalnet_tiny_lrf.ms_in1k": _cfg( + hf_hub_id='timm/'), + "focalnet_small_lrf.ms_in1k": _cfg( + hf_hub_id='timm/'), + "focalnet_base_lrf.ms_in1k": _cfg( + hf_hub_id='timm/'), + + "focalnet_large_fl3.ms_in22k": _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), + "focalnet_large_fl4.ms_in22k": _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), + "focalnet_xlarge_fl3.ms_in22k": _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), + "focalnet_xlarge_fl4.ms_in22k": _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), + "focalnet_huge_fl3.ms_in22k": _cfg( + hf_hub_id='timm/', + num_classes=21842), + "focalnet_huge_fl4.ms_in22k": _cfg( + hf_hub_id='timm/', + num_classes=0), +}) + + +def checkpoint_filter_fn(state_dict, model: FocalNet): + state_dict = state_dict.get('model', state_dict) + if 'stem.proj.weight' in state_dict: + return state_dict + import re + out_dict = {} + dest_dict = model.state_dict() + for k, v in state_dict.items(): + k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) + k = k.replace('patch_embed', 'stem') + k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) + if 'norm' in k and k not in dest_dict: + k = re.sub(r'norm([0-9])', r'norm\1_post', k) + k = k.replace('ln.', 'norm.') + k = k.replace('head', 'head.fc') + if k in dest_dict and dest_dict[k].numel() == v.numel() and dest_dict[k].shape != v.shape: + v = v.reshape(dest_dict[k].shape) + out_dict[k] = v + return out_dict + + +def _create_focalnet(variant, pretrained=False, **kwargs): + default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + FocalNet, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + return model + + +@register_model +def focalnet_tiny_srf(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, **kwargs) + return _create_focalnet('focalnet_tiny_srf', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_small_srf(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, **kwargs) + return _create_focalnet('focalnet_small_srf', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_base_srf(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, **kwargs) + return _create_focalnet('focalnet_base_srf', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_tiny_lrf(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) + return _create_focalnet('focalnet_tiny_lrf', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_small_lrf(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) + return _create_focalnet('focalnet_small_lrf', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_base_lrf(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, focal_levels=[3, 3, 3, 3], **kwargs) + return _create_focalnet('focalnet_base_lrf', pretrained=pretrained, **model_kwargs) + + +# FocalNet large+ models +@register_model +def focalnet_large_fl3(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict( + depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, + use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) + return _create_focalnet('focalnet_large_fl3', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_large_fl4(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict( + depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[4, 4, 4, 4], + use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) + return _create_focalnet('focalnet_large_fl4', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_xlarge_fl3(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict( + depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, + use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) + return _create_focalnet('focalnet_xlarge_fl3', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_xlarge_fl4(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict( + depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[4, 4, 4, 4], + use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) + return _create_focalnet('focalnet_xlarge_fl4', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_huge_fl3(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict( + depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[3, 3, 3, 3], focal_windows=[3] * 4, + use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) + return _create_focalnet('focalnet_huge_fl3', pretrained=pretrained, **model_kwargs) + + +@register_model +def focalnet_huge_fl4(pretrained=False, **kwargs) -> FocalNet: + model_kwargs = dict( + depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[4, 4, 4, 4], + use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) + return _create_focalnet('focalnet_huge_fl4', pretrained=pretrained, **model_kwargs) + diff --git a/pytorch-image-models/timm/models/fx_features.py b/pytorch-image-models/timm/models/fx_features.py new file mode 100644 index 0000000000000000000000000000000000000000..ae6848f7d59e85678888f301b8717c9df9aa16db --- /dev/null +++ b/pytorch-image-models/timm/models/fx_features.py @@ -0,0 +1,4 @@ +from ._features_fx import * + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning) diff --git a/pytorch-image-models/timm/models/gcvit.py b/pytorch-image-models/timm/models/gcvit.py new file mode 100644 index 0000000000000000000000000000000000000000..44660a3f6c08d1d4c288e17f47ecc341778f69e9 --- /dev/null +++ b/pytorch-image-models/timm/models/gcvit.py @@ -0,0 +1,592 @@ +""" Global Context ViT + +From scratch implementation of GCViT in the style of timm swin_transformer_v2_cr.py + +Global Context Vision Transformers -https://arxiv.org/abs/2206.09959 + +@article{hatamizadeh2022global, + title={Global Context Vision Transformers}, + author={Hatamizadeh, Ali and Yin, Hongxu and Kautz, Jan and Molchanov, Pavlo}, + journal={arXiv preprint arXiv:2206.09959}, + year={2022} +} + +Free of any code related to NVIDIA GCVit impl at https://github.com/NVlabs/GCVit. +The license for this code release is Apache 2.0 with no commercial restrictions. + +However, weight files adapted from NVIDIA GCVit impl ARE under a non-commercial share-alike license +(https://creativecommons.org/licenses/by-nc-sa/4.0/) until I have a chance to train new ones... + +Hacked together by / Copyright 2022, Ross Wightman +""" +import math +from functools import partial +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d, \ + get_attn, get_act_layer, get_norm_layer, RelPosBias, _assert +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_function +from ._manipulate import named_apply +from ._registry import register_model, generate_default_cfgs + +__all__ = ['GlobalContextVit'] + + +class MbConvBlock(nn.Module): + """ A depthwise separable / fused mbconv style residual block with SE, `no norm. + """ + def __init__( + self, + in_chs, + out_chs=None, + expand_ratio=1.0, + attn_layer='se', + bias=False, + act_layer=nn.GELU, + ): + super().__init__() + attn_kwargs = dict(act_layer=act_layer) + if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca': + attn_kwargs['rd_ratio'] = 0.25 + attn_kwargs['bias'] = False + attn_layer = get_attn(attn_layer) + out_chs = out_chs or in_chs + mid_chs = int(expand_ratio * in_chs) + + self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias) + self.act = act_layer() + self.se = attn_layer(mid_chs, **attn_kwargs) + self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias) + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + x = self.act(x) + x = self.se(x) + x = self.conv_pw(x) + x = x + shortcut + return x + + +class Downsample2d(nn.Module): + def __init__( + self, + dim, + dim_out=None, + reduction='conv', + act_layer=nn.GELU, + norm_layer=LayerNorm2d, # NOTE in NCHW + ): + super().__init__() + dim_out = dim_out or dim + + self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity() + self.conv_block = MbConvBlock(dim, act_layer=act_layer) + assert reduction in ('conv', 'max', 'avg') + if reduction == 'conv': + self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False) + elif reduction == 'max': + assert dim == dim_out + self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + assert dim == dim_out + self.reduction = nn.AvgPool2d(kernel_size=2) + self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity() + + def forward(self, x): + x = self.norm1(x) + x = self.conv_block(x) + x = self.reduction(x) + x = self.norm2(x) + return x + + +class FeatureBlock(nn.Module): + def __init__( + self, + dim, + levels=0, + reduction='max', + act_layer=nn.GELU, + ): + super().__init__() + reductions = levels + levels = max(1, levels) + if reduction == 'avg': + pool_fn = partial(nn.AvgPool2d, kernel_size=2) + else: + pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1) + self.blocks = nn.Sequential() + for i in range(levels): + self.blocks.add_module(f'conv{i+1}', MbConvBlock(dim, act_layer=act_layer)) + if reductions: + self.blocks.add_module(f'pool{i+1}', pool_fn()) + reductions -= 1 + + def forward(self, x): + return self.blocks(x) + + +class Stem(nn.Module): + def __init__( + self, + in_chs: int = 3, + out_chs: int = 96, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm2d, # NOTE stem in NCHW + ): + super().__init__() + self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1) + self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer) + + def forward(self, x): + x = self.conv1(x) + x = self.down(x) + return x + + +class WindowAttentionGlobal(nn.Module): + + def __init__( + self, + dim: int, + num_heads: int, + window_size: Tuple[int, int], + use_global: bool = True, + qkv_bias: bool = True, + attn_drop: float = 0., + proj_drop: float = 0., + ): + super().__init__() + window_size = to_2tuple(window_size) + self.window_size = window_size + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.use_global = use_global + + self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads) + if self.use_global: + self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias) + else: + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, q_global: Optional[torch.Tensor] = None): + B, N, C = x.shape + if self.use_global and q_global is not None: + _assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal') + + kv = self.qkv(x) + kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + + q = q_global.repeat(B // q_global.shape[0], 1, 1, 1) + q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) + else: + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + q = q * self.scale + + attn = q @ k.transpose(-2, -1).contiguous() # NOTE contiguous() fixes an odd jit bug in PyTorch 2.0 + attn = self.rel_pos(attn) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +def window_partition(x, window_size: Tuple[int, int]): + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): + H, W = img_size + C = windows.shape[-1] + x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class GlobalContextVitBlock(nn.Module): + def __init__( + self, + dim: int, + feat_size: Tuple[int, int], + num_heads: int, + window_size: int = 7, + mlp_ratio: float = 4., + use_global: bool = True, + qkv_bias: bool = True, + layer_scale: Optional[float] = None, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + attn_layer: Callable = WindowAttentionGlobal, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + ): + super().__init__() + feat_size = to_2tuple(feat_size) + window_size = to_2tuple(window_size) + self.window_size = window_size + self.num_windows = int((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1])) + + self.norm1 = norm_layer(dim) + self.attn = attn_layer( + dim, + num_heads=num_heads, + window_size=window_size, + use_global=use_global, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) + self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _window_attn(self, x, q_global: Optional[torch.Tensor] = None): + B, H, W, C = x.shape + x_win = window_partition(x, self.window_size) + x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C) + attn_win = self.attn(x_win, q_global) + x = window_reverse(attn_win, self.window_size, (H, W)) + return x + + def forward(self, x, q_global: Optional[torch.Tensor] = None): + x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class GlobalContextVitStage(nn.Module): + def __init__( + self, + dim, + depth: int, + num_heads: int, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + downsample: bool = True, + global_norm: bool = False, + stage_norm: bool = False, + mlp_ratio: float = 4., + qkv_bias: bool = True, + layer_scale: Optional[float] = None, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: Union[List[float], float] = 0.0, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + norm_layer_cl: Callable = LayerNorm2d, + ): + super().__init__() + if downsample: + self.downsample = Downsample2d( + dim=dim, + dim_out=dim * 2, + norm_layer=norm_layer, + ) + dim = dim * 2 + feat_size = (feat_size[0] // 2, feat_size[1] // 2) + else: + self.downsample = nn.Identity() + self.feat_size = feat_size + window_size = to_2tuple(window_size) + + feat_levels = int(math.log2(min(feat_size) / min(window_size))) + self.global_block = FeatureBlock(dim, feat_levels) + self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity() + + self.blocks = nn.ModuleList([ + GlobalContextVitBlock( + dim=dim, + num_heads=num_heads, + feat_size=feat_size, + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + use_global=(i % 2 != 0), + layer_scale=layer_scale, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + act_layer=act_layer, + norm_layer=norm_layer_cl, + ) + for i in range(depth) + ]) + self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity() + self.dim = dim + self.feat_size = feat_size + self.grad_checkpointing = False + + def forward(self, x): + # input NCHW, downsample & global block are 2d conv + pooling + x = self.downsample(x) + global_query = self.global_block(x) + + # reshape NCHW --> NHWC for transformer blocks + x = x.permute(0, 2, 3, 1) + global_query = self.global_norm(global_query.permute(0, 2, 3, 1)) + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x, global_query) + x = self.norm(x) + x = x.permute(0, 3, 1, 2).contiguous() # back to NCHW + return x + + +class GlobalContextVit(nn.Module): + def __init__( + self, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + img_size: Tuple[int, int] = 224, + window_ratio: Tuple[int, ...] = (32, 32, 16, 32), + window_size: Tuple[int, ...] = None, + embed_dim: int = 64, + depths: Tuple[int, ...] = (3, 4, 19, 5), + num_heads: Tuple[int, ...] = (2, 4, 8, 16), + mlp_ratio: float = 3.0, + qkv_bias: bool = True, + layer_scale: Optional[float] = None, + drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + weight_init='', + act_layer: str = 'gelu', + norm_layer: str = 'layernorm2d', + norm_layer_cl: str = 'layernorm', + norm_eps: float = 1e-5, + ): + super().__init__() + act_layer = get_act_layer(act_layer) + norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) + norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps) + + img_size = to_2tuple(img_size) + feat_size = tuple(d // 4 for d in img_size) # stem reduction by 4 + self.global_pool = global_pool + self.num_classes = num_classes + self.drop_rate = drop_rate + num_stages = len(depths) + self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (num_stages - 1)) + if window_size is not None: + window_size = to_ntuple(num_stages)(window_size) + else: + assert window_ratio is not None + window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)]) + + self.stem = Stem( + in_chs=in_chans, + out_chs=embed_dim, + act_layer=act_layer, + norm_layer=norm_layer + ) + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + stages = [] + for i in range(num_stages): + last_stage = i == num_stages - 1 + stage_scale = 2 ** max(i - 1, 0) + stages.append(GlobalContextVitStage( + dim=embed_dim * stage_scale, + depth=depths[i], + num_heads=num_heads[i], + feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale), + window_size=window_size[i], + downsample=i != 0, + stage_norm=last_stage, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + layer_scale=layer_scale, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + act_layer=act_layer, + norm_layer=norm_layer, + norm_layer_cl=norm_layer_cl, + )) + self.stages = nn.Sequential(*stages) + + # Classifier head + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + if weight_init: + named_apply(partial(self._init_weights, scheme=weight_init), self) + + def _init_weights(self, module, name, scheme='vit'): + # note Conv2d left as default init + if scheme == 'vit': + if isinstance(module, nn.Linear): + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + else: + if isinstance(module, nn.Linear): + nn.init.normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + + @torch.jit.ignore + def no_weight_decay(self): + return { + k for k, _ in self.named_parameters() + if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', # stem and embed + blocks=r'^stages\.(\d+)' + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is None: + global_pool = self.head.global_pool.pool_type + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.stem(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_gcvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + 'fixed_input_size': True, + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'gcvit_xxtiny.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'), + 'gcvit_xtiny.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'), + 'gcvit_tiny.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'), + 'gcvit_small.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'), + 'gcvit_base.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'), +}) + + +@register_model +def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit: + model_kwargs = dict( + depths=(2, 2, 6, 2), + num_heads=(2, 4, 8, 16), + **kwargs) + return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_xtiny(pretrained=False, **kwargs) -> GlobalContextVit: + model_kwargs = dict( + depths=(3, 4, 6, 5), + num_heads=(2, 4, 8, 16), + **kwargs) + return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_tiny(pretrained=False, **kwargs) -> GlobalContextVit: + model_kwargs = dict( + depths=(3, 4, 19, 5), + num_heads=(2, 4, 8, 16), + **kwargs) + return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_small(pretrained=False, **kwargs) -> GlobalContextVit: + model_kwargs = dict( + depths=(3, 4, 19, 5), + num_heads=(3, 6, 12, 24), + embed_dim=96, + mlp_ratio=2, + layer_scale=1e-5, + **kwargs) + return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_base(pretrained=False, **kwargs) -> GlobalContextVit: + model_kwargs = dict( + depths=(3, 4, 19, 5), + num_heads=(4, 8, 16, 32), + embed_dim=128, + mlp_ratio=2, + layer_scale=1e-5, + **kwargs) + return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs) diff --git a/pytorch-image-models/timm/models/ghostnet.py b/pytorch-image-models/timm/models/ghostnet.py new file mode 100644 index 0000000000000000000000000000000000000000..d73276d4e4574dcb370abf734859d008746c3b2d --- /dev/null +++ b/pytorch-image-models/timm/models/ghostnet.py @@ -0,0 +1,433 @@ +""" +An implementation of GhostNet & GhostNetV2 Models as defined in: +GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 +GhostNetV2: Enhance Cheap Operation with Long-Range Attention. https://proceedings.neurips.cc/paper_files/paper/2022/file/40b60852a4abdaa696b5a1a78da34635-Paper-Conference.pdf + +The train script & code of models at: +Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch +Original model: https://github.com/huawei-noah/Efficient-AI-Backbones/blob/master/ghostnetv2_pytorch/model/ghostnetv2_torch.py +""" +import math +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SelectAdaptivePool2d, Linear, make_divisible +from ._builder import build_model_with_cfg +from ._efficientnet_blocks import SqueezeExcite, ConvBnAct +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['GhostNet'] + + +_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) + + +class GhostModule(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size=1, + ratio=2, + dw_size=3, + stride=1, + use_act=True, + act_layer=nn.ReLU, + ): + super(GhostModule, self).__init__() + self.out_chs = out_chs + init_chs = math.ceil(out_chs / ratio) + new_chs = init_chs * (ratio - 1) + + self.primary_conv = nn.Sequential( + nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), + nn.BatchNorm2d(init_chs), + act_layer(inplace=True) if use_act else nn.Identity(), + ) + + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False), + nn.BatchNorm2d(new_chs), + act_layer(inplace=True) if use_act else nn.Identity(), + ) + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + out = torch.cat([x1, x2], dim=1) + return out[:, :self.out_chs, :, :] + + +class GhostModuleV2(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size=1, + ratio=2, + dw_size=3, + stride=1, + use_act=True, + act_layer=nn.ReLU, + ): + super().__init__() + self.gate_fn = nn.Sigmoid() + self.out_chs = out_chs + init_chs = math.ceil(out_chs / ratio) + new_chs = init_chs * (ratio - 1) + self.primary_conv = nn.Sequential( + nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), + nn.BatchNorm2d(init_chs), + act_layer(inplace=True) if use_act else nn.Identity(), + ) + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False), + nn.BatchNorm2d(new_chs), + act_layer(inplace=True) if use_act else nn.Identity(), + ) + self.short_conv = nn.Sequential( + nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False), + nn.BatchNorm2d(out_chs), + nn.Conv2d(out_chs, out_chs, kernel_size=(1, 5), stride=1, padding=(0, 2), groups=out_chs, bias=False), + nn.BatchNorm2d(out_chs), + nn.Conv2d(out_chs, out_chs, kernel_size=(5, 1), stride=1, padding=(2, 0), groups=out_chs, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + res = self.short_conv(F.avg_pool2d(x, kernel_size=2, stride=2)) + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + out = torch.cat([x1, x2], dim=1) + return out[:, :self.out_chs, :, :] * F.interpolate( + self.gate_fn(res), size=(out.shape[-2], out.shape[-1]), mode='nearest') + + +class GhostBottleneck(nn.Module): + """ Ghost bottleneck w/ optional SE""" + + def __init__( + self, + in_chs, + mid_chs, + out_chs, + dw_kernel_size=3, + stride=1, + act_layer=nn.ReLU, + se_ratio=0., + mode='original', + ): + super(GhostBottleneck, self).__init__() + has_se = se_ratio is not None and se_ratio > 0. + self.stride = stride + + # Point-wise expansion + if mode == 'original': + self.ghost1 = GhostModule(in_chs, mid_chs, use_act=True, act_layer=act_layer) + else: + self.ghost1 = GhostModuleV2(in_chs, mid_chs, use_act=True, act_layer=act_layer) + + # Depth-wise convolution + if self.stride > 1: + self.conv_dw = nn.Conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) + self.bn_dw = nn.BatchNorm2d(mid_chs) + else: + self.conv_dw = None + self.bn_dw = None + + # Squeeze-and-excitation + self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None + + # Point-wise linear projection + self.ghost2 = GhostModule(mid_chs, out_chs, use_act=False) + + # shortcut + if in_chs == out_chs and self.stride == 1: + self.shortcut = nn.Sequential() + else: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), + nn.BatchNorm2d(in_chs), + nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + shortcut = x + + # 1st ghost bottleneck + x = self.ghost1(x) + + # Depth-wise convolution + if self.conv_dw is not None: + x = self.conv_dw(x) + x = self.bn_dw(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # 2nd ghost bottleneck + x = self.ghost2(x) + + x += self.shortcut(shortcut) + return x + + +class GhostNet(nn.Module): + def __init__( + self, + cfgs, + num_classes=1000, + width=1.0, + in_chans=3, + output_stride=32, + global_pool='avg', + drop_rate=0.2, + version='v1', + ): + super(GhostNet, self).__init__() + # setting of inverted residual blocks + assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' + self.cfgs = cfgs + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.feature_info = [] + + # building first layer + stem_chs = make_divisible(16 * width, 4) + self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) + self.bn1 = nn.BatchNorm2d(stem_chs) + self.act1 = nn.ReLU(inplace=True) + prev_chs = stem_chs + + # building inverted residual blocks + stages = nn.ModuleList([]) + stage_idx = 0 + layer_idx = 0 + net_stride = 2 + for cfg in self.cfgs: + layers = [] + s = 1 + for k, exp_size, c, se_ratio, s in cfg: + out_chs = make_divisible(c * width, 4) + mid_chs = make_divisible(exp_size * width, 4) + layer_kwargs = {} + if version == 'v2' and layer_idx > 1: + layer_kwargs['mode'] = 'attn' + layers.append(GhostBottleneck(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, **layer_kwargs)) + prev_chs = out_chs + layer_idx += 1 + if s > 1: + net_stride *= 2 + self.feature_info.append(dict( + num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) + stages.append(nn.Sequential(*layers)) + stage_idx += 1 + + out_chs = make_divisible(exp_size * width, 4) + stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) + self.pool_dim = prev_chs = out_chs + + self.blocks = nn.Sequential(*stages) + + # building last several layers + self.num_features = prev_chs + self.head_hidden_size = out_chs = 1280 + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) + self.act2 = nn.ReLU(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() + + # FIXME init + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv_stem|bn1', + blocks=[ + (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), + (r'conv_head', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classifier + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=True) + else: + x = self.blocks(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.classifier(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model: nn.Module): + out_dict = {} + for k, v in state_dict.items(): + if 'total' in k: + continue + out_dict[k] = v + return out_dict + + +def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): + """ + Constructs a GhostNet model + """ + cfgs = [ + # k, t, c, SE, s + # stage1 + [[3, 16, 16, 0, 1]], + # stage2 + [[3, 48, 24, 0, 2]], + [[3, 72, 24, 0, 1]], + # stage3 + [[5, 72, 40, 0.25, 2]], + [[5, 120, 40, 0.25, 1]], + # stage4 + [[3, 240, 80, 0, 2]], + [[3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 0.25, 1], + [3, 672, 112, 0.25, 1] + ], + # stage5 + [[5, 672, 160, 0.25, 2]], + [[5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1] + ] + ] + model_kwargs = dict( + cfgs=cfgs, + width=width, + **kwargs, + ) + return build_model_with_cfg( + GhostNet, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True), + **model_kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'ghostnet_050.untrained': _cfg(), + 'ghostnet_100.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth' + ), + 'ghostnet_130.untrained': _cfg(), + 'ghostnetv2_100.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_10.pth.tar' + ), + 'ghostnetv2_130.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_13.pth.tar' + ), + 'ghostnetv2_160.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_16.pth.tar' + ), +}) + + +@register_model +def ghostnet_050(pretrained=False, **kwargs) -> GhostNet: + """ GhostNet-0.5x """ + model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_100(pretrained=False, **kwargs) -> GhostNet: + """ GhostNet-1.0x """ + model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_130(pretrained=False, **kwargs) -> GhostNet: + """ GhostNet-1.3x """ + model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnetv2_100(pretrained=False, **kwargs) -> GhostNet: + """ GhostNetV2-1.0x """ + model = _create_ghostnet('ghostnetv2_100', width=1.0, pretrained=pretrained, version='v2', **kwargs) + return model + + +@register_model +def ghostnetv2_130(pretrained=False, **kwargs) -> GhostNet: + """ GhostNetV2-1.3x """ + model = _create_ghostnet('ghostnetv2_130', width=1.3, pretrained=pretrained, version='v2', **kwargs) + return model + + +@register_model +def ghostnetv2_160(pretrained=False, **kwargs) -> GhostNet: + """ GhostNetV2-1.6x """ + model = _create_ghostnet('ghostnetv2_160', width=1.6, pretrained=pretrained, version='v2', **kwargs) + return model diff --git a/pytorch-image-models/timm/models/hardcorenas.py b/pytorch-image-models/timm/models/hardcorenas.py new file mode 100644 index 0000000000000000000000000000000000000000..459c1a3db845d2dc8b16c27521397687340ffe98 --- /dev/null +++ b/pytorch-image-models/timm/models/hardcorenas.py @@ -0,0 +1,156 @@ +from functools import partial + +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from ._builder import build_model_with_cfg +from ._builder import pretrained_cfg_for_features +from ._efficientnet_blocks import SqueezeExcite +from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels +from ._registry import register_model, generate_default_cfgs +from .mobilenetv3 import MobileNetV3, MobileNetV3Features + +__all__ = [] # model_registry will add each entrypoint fn to this + + +def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): + """Creates a hardcorenas model + + Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS + Paper: https://arxiv.org/abs/2102.11646 + + """ + num_features = 1280 + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=32, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=se_layer, + **kwargs, + ) + + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if model_kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, + variant, + pretrained, + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs, + ) + if features_only: + model.default_cfg = pretrained_cfg_for_features(model.default_cfg) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'), + 'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'), + 'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'), + 'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'), + 'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'), + 'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3: + """ hardcorenas_A """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3: + """ hardcorenas_B """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], + ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3: + """ hardcorenas_C """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', + 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3: + """ hardcorenas_D """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], + ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3: + """ hardcorenas_E """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', + 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3: + """ hardcorenas_F """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k3_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) + return model diff --git a/pytorch-image-models/timm/models/helpers.py b/pytorch-image-models/timm/models/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf6d19e7cd7a1e2f8f51270fbf1bede9e8aec6a --- /dev/null +++ b/pytorch-image-models/timm/models/helpers.py @@ -0,0 +1,7 @@ +from ._builder import * +from ._helpers import * +from ._manipulate import * +from ._prune import * + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning) diff --git a/pytorch-image-models/timm/models/hgnet.py b/pytorch-image-models/timm/models/hgnet.py new file mode 100644 index 0000000000000000000000000000000000000000..ea0a92d9555a798b2f9c37443488ca7e1fcbb1d2 --- /dev/null +++ b/pytorch-image-models/timm/models/hgnet.py @@ -0,0 +1,738 @@ +""" PP-HGNet (V1 & V2) + +Reference: +https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/ImageNet1k/PP-HGNetV2.md +The Paddle Implement of PP-HGNet (https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/docs/en/models/PP-HGNet_en.md) +PP-HGNet: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet.py +PP-HGNetv2: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet_v2.py +""" +from typing import Dict, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SelectAdaptivePool2d, DropPath, create_conv2d +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs +from ._manipulate import checkpoint_seq + +__all__ = ['HighPerfGpuNet'] + + +class LearnableAffineBlock(nn.Module): + def __init__( + self, + scale_value=1.0, + bias_value=0.0 + ): + super().__init__() + self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True) + self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True) + + def forward(self, x): + return self.scale * x + self.bias + + +class ConvBNAct(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size, + stride=1, + groups=1, + padding='', + use_act=True, + use_lab=False + ): + super().__init__() + self.use_act = use_act + self.use_lab = use_lab + self.conv = create_conv2d( + in_chs, + out_chs, + kernel_size, + stride=stride, + padding=padding, + groups=groups, + ) + self.bn = nn.BatchNorm2d(out_chs) + if self.use_act: + self.act = nn.ReLU() + else: + self.act = nn.Identity() + if self.use_act and self.use_lab: + self.lab = LearnableAffineBlock() + else: + self.lab = nn.Identity() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.act(x) + x = self.lab(x) + return x + + +class LightConvBNAct(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size, + groups=1, + use_lab=False + ): + super().__init__() + self.conv1 = ConvBNAct( + in_chs, + out_chs, + kernel_size=1, + use_act=False, + use_lab=use_lab, + ) + self.conv2 = ConvBNAct( + out_chs, + out_chs, + kernel_size=kernel_size, + groups=out_chs, + use_act=True, + use_lab=use_lab, + ) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + +class EseModule(nn.Module): + def __init__(self, chs): + super().__init__() + self.conv = nn.Conv2d( + chs, + chs, + kernel_size=1, + stride=1, + padding=0, + ) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + identity = x + x = x.mean((2, 3), keepdim=True) + x = self.conv(x) + x = self.sigmoid(x) + return torch.mul(identity, x) + + +class StemV1(nn.Module): + # for PP-HGNet + def __init__(self, stem_chs): + super().__init__() + self.stem = nn.Sequential(*[ + ConvBNAct( + stem_chs[i], + stem_chs[i + 1], + kernel_size=3, + stride=2 if i == 0 else 1) for i in range( + len(stem_chs) - 1) + ]) + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def forward(self, x): + x = self.stem(x) + x = self.pool(x) + return x + + +class StemV2(nn.Module): + # for PP-HGNetv2 + def __init__(self, in_chs, mid_chs, out_chs, use_lab=False): + super().__init__() + self.stem1 = ConvBNAct( + in_chs, + mid_chs, + kernel_size=3, + stride=2, + use_lab=use_lab, + ) + self.stem2a = ConvBNAct( + mid_chs, + mid_chs // 2, + kernel_size=2, + stride=1, + use_lab=use_lab, + ) + self.stem2b = ConvBNAct( + mid_chs // 2, + mid_chs, + kernel_size=2, + stride=1, + use_lab=use_lab, + ) + self.stem3 = ConvBNAct( + mid_chs * 2, + mid_chs, + kernel_size=3, + stride=2, + use_lab=use_lab, + ) + self.stem4 = ConvBNAct( + mid_chs, + out_chs, + kernel_size=1, + stride=1, + use_lab=use_lab, + ) + self.pool = nn.MaxPool2d(kernel_size=2, stride=1, ceil_mode=True) + + def forward(self, x): + x = self.stem1(x) + x = F.pad(x, (0, 1, 0, 1)) + x2 = self.stem2a(x) + x2 = F.pad(x2, (0, 1, 0, 1)) + x2 = self.stem2b(x2) + x1 = self.pool(x) + x = torch.cat([x1, x2], dim=1) + x = self.stem3(x) + x = self.stem4(x) + return x + + +class HighPerfGpuBlock(nn.Module): + def __init__( + self, + in_chs, + mid_chs, + out_chs, + layer_num, + kernel_size=3, + residual=False, + light_block=False, + use_lab=False, + agg='ese', + drop_path=0., + ): + super().__init__() + self.residual = residual + + self.layers = nn.ModuleList() + for i in range(layer_num): + if light_block: + self.layers.append( + LightConvBNAct( + in_chs if i == 0 else mid_chs, + mid_chs, + kernel_size=kernel_size, + use_lab=use_lab, + ) + ) + else: + self.layers.append( + ConvBNAct( + in_chs if i == 0 else mid_chs, + mid_chs, + kernel_size=kernel_size, + stride=1, + use_lab=use_lab, + ) + ) + + # feature aggregation + total_chs = in_chs + layer_num * mid_chs + if agg == 'se': + aggregation_squeeze_conv = ConvBNAct( + total_chs, + out_chs // 2, + kernel_size=1, + stride=1, + use_lab=use_lab, + ) + aggregation_excitation_conv = ConvBNAct( + out_chs // 2, + out_chs, + kernel_size=1, + stride=1, + use_lab=use_lab, + ) + self.aggregation = nn.Sequential( + aggregation_squeeze_conv, + aggregation_excitation_conv, + ) + else: + aggregation_conv = ConvBNAct( + total_chs, + out_chs, + kernel_size=1, + stride=1, + use_lab=use_lab, + ) + att = EseModule(out_chs) + self.aggregation = nn.Sequential( + aggregation_conv, + att, + ) + + self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() + + def forward(self, x): + identity = x + output = [x] + for layer in self.layers: + x = layer(x) + output.append(x) + x = torch.cat(output, dim=1) + x = self.aggregation(x) + if self.residual: + x = self.drop_path(x) + identity + return x + + +class HighPerfGpuStage(nn.Module): + def __init__( + self, + in_chs, + mid_chs, + out_chs, + block_num, + layer_num, + downsample=True, + stride=2, + light_block=False, + kernel_size=3, + use_lab=False, + agg='ese', + drop_path=0., + ): + super().__init__() + self.downsample = downsample + if downsample: + self.downsample = ConvBNAct( + in_chs, + in_chs, + kernel_size=3, + stride=stride, + groups=in_chs, + use_act=False, + use_lab=use_lab, + ) + else: + self.downsample = nn.Identity() + + blocks_list = [] + for i in range(block_num): + blocks_list.append( + HighPerfGpuBlock( + in_chs if i == 0 else out_chs, + mid_chs, + out_chs, + layer_num, + residual=False if i == 0 else True, + kernel_size=kernel_size, + light_block=light_block, + use_lab=use_lab, + agg=agg, + drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path, + ) + ) + self.blocks = nn.Sequential(*blocks_list) + self.grad_checkpointing= False + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=False) + else: + x = self.blocks(x) + return x + + +class ClassifierHead(nn.Module): + def __init__( + self, + in_features: int, + num_classes: int, + pool_type: str = 'avg', + drop_rate: float = 0., + hidden_size: Optional[int] = 2048, + use_lab: bool = False + ): + super(ClassifierHead, self).__init__() + self.num_features = in_features + if pool_type is not None: + if not pool_type: + assert num_classes == 0, 'Classifier head must be removed if pooling is disabled' + + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) + if hidden_size is not None: + self.num_features = hidden_size + last_conv = nn.Conv2d( + in_features, + hidden_size, + kernel_size=1, + stride=1, + padding=0, + bias=False, + ) + act = nn.ReLU() + if use_lab: + lab = LearnableAffineBlock() + self.last_conv = nn.Sequential(last_conv, act, lab) + else: + self.last_conv = nn.Sequential(last_conv, act) + else: + self.last_conv = nn.Identity() + + self.dropout = nn.Dropout(drop_rate) + self.flatten = nn.Flatten(1) if pool_type else nn.Identity() # don't flatten if pooling disabled + self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def reset(self, num_classes: int, pool_type: Optional[str] = None): + if pool_type is not None: + if not pool_type: + assert num_classes == 0, 'Classifier head must be removed if pooling is disabled' + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) + self.flatten = nn.Flatten(1) if pool_type else nn.Identity() # don't flatten if pooling disabled + + self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.last_conv(x) + x = self.dropout(x) + x = self.flatten(x) + if pre_logits: + return x + x = self.fc(x) + return x + + +class HighPerfGpuNet(nn.Module): + + def __init__( + self, + cfg: Dict, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + head_hidden_size: Optional[int] = 2048, + drop_rate: float = 0., + drop_path_rate: float = 0., + use_lab: bool = False, + **kwargs, + ): + super(HighPerfGpuNet, self).__init__() + stem_type = cfg["stem_type"] + stem_chs = cfg["stem_chs"] + stages_cfg = [cfg["stage1"], cfg["stage2"], cfg["stage3"], cfg["stage4"]] + self.num_classes = num_classes + self.drop_rate = drop_rate + self.use_lab = use_lab + + assert stem_type in ['v1', 'v2'] + if stem_type == 'v2': + self.stem = StemV2( + in_chs=in_chans, + mid_chs=stem_chs[0], + out_chs=stem_chs[1], + use_lab=use_lab) + else: + self.stem = StemV1([in_chans] + stem_chs) + + current_stride = 4 + + stages = [] + self.feature_info = [] + block_depths = [c[3] for c in stages_cfg] + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(block_depths)).split(block_depths)] + for i, stage_config in enumerate(stages_cfg): + in_chs, mid_chs, out_chs, block_num, downsample, light_block, kernel_size, layer_num = stage_config + stages += [HighPerfGpuStage( + in_chs=in_chs, + mid_chs=mid_chs, + out_chs=out_chs, + block_num=block_num, + layer_num=layer_num, + downsample=downsample, + light_block=light_block, + kernel_size=kernel_size, + use_lab=use_lab, + agg='ese' if stem_type == 'v1' else 'se', + drop_path=dpr[i], + )] + self.num_features = out_chs + if downsample: + current_stride *= 2 + self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + + self.head = ClassifierHead( + self.num_features, + num_classes=num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + hidden_size=head_hidden_size, + use_lab=use_lab + ) + self.head_hidden_size = self.head.num_features + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + return self.stages(x) + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +model_cfgs = dict( + # PP-HGNet + hgnet_tiny={ + "stem_type": 'v1', + "stem_chs": [48, 48, 96], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [96, 96, 224, 1, False, False, 3, 5], + "stage2": [224, 128, 448, 1, True, False, 3, 5], + "stage3": [448, 160, 512, 2, True, False, 3, 5], + "stage4": [512, 192, 768, 1, True, False, 3, 5], + }, + hgnet_small={ + "stem_type": 'v1', + "stem_chs": [64, 64, 128], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [128, 128, 256, 1, False, False, 3, 6], + "stage2": [256, 160, 512, 1, True, False, 3, 6], + "stage3": [512, 192, 768, 2, True, False, 3, 6], + "stage4": [768, 224, 1024, 1, True, False, 3, 6], + }, + hgnet_base={ + "stem_type": 'v1', + "stem_chs": [96, 96, 160], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [160, 192, 320, 1, False, False, 3, 7], + "stage2": [320, 224, 640, 2, True, False, 3, 7], + "stage3": [640, 256, 960, 3, True, False, 3, 7], + "stage4": [960, 288, 1280, 2, True, False, 3, 7], + }, + # PP-HGNetv2 + hgnetv2_b0={ + "stem_type": 'v2', + "stem_chs": [16, 16], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [16, 16, 64, 1, False, False, 3, 3], + "stage2": [64, 32, 256, 1, True, False, 3, 3], + "stage3": [256, 64, 512, 2, True, True, 5, 3], + "stage4": [512, 128, 1024, 1, True, True, 5, 3], + }, + hgnetv2_b1={ + "stem_type": 'v2', + "stem_chs": [24, 32], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [32, 32, 64, 1, False, False, 3, 3], + "stage2": [64, 48, 256, 1, True, False, 3, 3], + "stage3": [256, 96, 512, 2, True, True, 5, 3], + "stage4": [512, 192, 1024, 1, True, True, 5, 3], + }, + hgnetv2_b2={ + "stem_type": 'v2', + "stem_chs": [24, 32], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [32, 32, 96, 1, False, False, 3, 4], + "stage2": [96, 64, 384, 1, True, False, 3, 4], + "stage3": [384, 128, 768, 3, True, True, 5, 4], + "stage4": [768, 256, 1536, 1, True, True, 5, 4], + }, + hgnetv2_b3={ + "stem_type": 'v2', + "stem_chs": [24, 32], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [32, 32, 128, 1, False, False, 3, 5], + "stage2": [128, 64, 512, 1, True, False, 3, 5], + "stage3": [512, 128, 1024, 3, True, True, 5, 5], + "stage4": [1024, 256, 2048, 1, True, True, 5, 5], + }, + hgnetv2_b4={ + "stem_type": 'v2', + "stem_chs": [32, 48], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [48, 48, 128, 1, False, False, 3, 6], + "stage2": [128, 96, 512, 1, True, False, 3, 6], + "stage3": [512, 192, 1024, 3, True, True, 5, 6], + "stage4": [1024, 384, 2048, 1, True, True, 5, 6], + }, + hgnetv2_b5={ + "stem_type": 'v2', + "stem_chs": [32, 64], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [64, 64, 128, 1, False, False, 3, 6], + "stage2": [128, 128, 512, 2, True, False, 3, 6], + "stage3": [512, 256, 1024, 5, True, True, 5, 6], + "stage4": [1024, 512, 2048, 2, True, True, 5, 6], + }, + hgnetv2_b6={ + "stem_type": 'v2', + "stem_chs": [48, 96], + # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num + "stage1": [96, 96, 192, 2, False, False, 3, 6], + "stage2": [192, 192, 512, 3, True, False, 3, 6], + "stage3": [512, 384, 1024, 6, True, True, 5, 6], + "stage4": [1024, 768, 2048, 3, True, True, 5, 6], + }, +) + + +def _create_hgnet(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) + return build_model_with_cfg( + HighPerfGpuNet, + variant, + pretrained, + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.965, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'classifier': 'head.fc', 'first_conv': 'stem.stem1.conv', + 'test_crop_pct': 1.0, 'test_input_size': (3, 288, 288), + **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'hgnet_tiny.paddle_in1k': _cfg( + first_conv='stem.stem.0.conv', + hf_hub_id='timm/'), + 'hgnet_tiny.ssld_in1k': _cfg( + first_conv='stem.stem.0.conv', + hf_hub_id='timm/'), + 'hgnet_small.paddle_in1k': _cfg( + first_conv='stem.stem.0.conv', + hf_hub_id='timm/'), + 'hgnet_small.ssld_in1k': _cfg( + first_conv='stem.stem.0.conv', + hf_hub_id='timm/'), + 'hgnet_base.ssld_in1k': _cfg( + first_conv='stem.stem.0.conv', + hf_hub_id='timm/'), + 'hgnetv2_b0.ssld_stage2_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b0.ssld_stage1_in22k_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b1.ssld_stage2_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b1.ssld_stage1_in22k_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b2.ssld_stage2_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b2.ssld_stage1_in22k_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b3.ssld_stage2_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b3.ssld_stage1_in22k_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b4.ssld_stage2_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b4.ssld_stage1_in22k_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b5.ssld_stage2_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b5.ssld_stage1_in22k_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b6.ssld_stage2_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'hgnetv2_b6.ssld_stage1_in22k_in1k': _cfg( + hf_hub_id='timm/'), +}) + + +@register_model +def hgnet_tiny(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnet_tiny', pretrained=pretrained, **kwargs) + + +@register_model +def hgnet_small(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnet_small', pretrained=pretrained, **kwargs) + + +@register_model +def hgnet_base(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnet_base', pretrained=pretrained, **kwargs) + + +@register_model +def hgnetv2_b0(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnetv2_b0', pretrained=pretrained, use_lab=True, **kwargs) + + +@register_model +def hgnetv2_b1(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnetv2_b1', pretrained=pretrained, use_lab=True, **kwargs) + + +@register_model +def hgnetv2_b2(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnetv2_b2', pretrained=pretrained, use_lab=True, **kwargs) + + +@register_model +def hgnetv2_b3(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnetv2_b3', pretrained=pretrained, use_lab=True, **kwargs) + + +@register_model +def hgnetv2_b4(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnetv2_b4', pretrained=pretrained, **kwargs) + + +@register_model +def hgnetv2_b5(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnetv2_b5', pretrained=pretrained, **kwargs) + + +@register_model +def hgnetv2_b6(pretrained=False, **kwargs) -> HighPerfGpuNet: + return _create_hgnet('hgnetv2_b6', pretrained=pretrained, **kwargs) diff --git a/pytorch-image-models/timm/models/hiera.py b/pytorch-image-models/timm/models/hiera.py new file mode 100644 index 0000000000000000000000000000000000000000..34d6670fbef586574745fee59ef84d1c7a0babf1 --- /dev/null +++ b/pytorch-image-models/timm/models/hiera.py @@ -0,0 +1,996 @@ +""" An PyTorch implementation of Hiera + +Adapted for timm from originals at https://github.com/facebookresearch/hiera +""" + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# +# Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles +# +# Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, +# Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, +# Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer. +# +# Paper: https://arxiv.org/abs/2306.00989/ +# +# References: +# slowfast: https://github.com/facebookresearch/SlowFast +# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm +# -------------------------------------------------------- +import math +from functools import partial +from typing import Callable, Dict, List, Optional, Tuple, Type, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, Mlp, LayerScale, ClNormMlpClassifierHead, use_fused_attn, \ + _assert, get_norm_layer, to_2tuple, init_weight_vit, init_weight_jax + +from ._registry import generate_default_cfgs, register_model +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_function +from ._manipulate import named_apply + + +__all__ = ['Hiera'] + + +def conv_nd(n: int) -> Type[nn.Module]: + """ + Returns a conv with nd (e.g., Conv2d for n=2). Work up to n=3. + If you wanted a 4d Hiera, you could probably just implement this for n=4. (no promises) + """ + return [nn.Identity, nn.Conv1d, nn.Conv2d, nn.Conv3d][n] + + +@register_notrace_function +def get_resized_mask(target_size: List[int], mask: torch.Tensor) -> torch.Tensor: + # target_size: [(T), (H), W] + # (spatial) mask: [B, C, (t), (h), w] + if mask is None: + return mask + + _assert(len(mask.shape[2:]) == len(target_size), "mask spatial shape and target_size must match.") + if mask.shape[2:] != target_size: + return F.interpolate(mask.float(), size=target_size) + return mask + + +def undo_windowing( + x: torch.Tensor, + shape: List[int], + mu_shape: List[int], +) -> torch.Tensor: + """ + Restore spatial organization by undoing windowed organization of mask units. + + Args: + x: organized by mask units windows, e.g. in 2d [B, #MUy*#MUx, MUy, MUx, C] + shape: current spatial shape, if it were not organized into mask unit + windows, e.g. in 2d [B, #MUy*MUy, #MUx*MUx, C]. + mu_shape: current mask unit shape, e.g. in 2d [MUy, MUx] + Returns: + x: e.g. in 2d, [B, #MUy*MUy, #MUx*MUx, C] + """ + D = len(shape) + B, C = x.shape[0], x.shape[-1] + # [B, #MUy*#MUx, MUy, MUx, C] -> [B, #MUy, #MUx, MUy, MUx, C] + num_MUs = [s // mu for s, mu in zip(shape, mu_shape)] + x = x.view(B, *num_MUs, *mu_shape, C) + + # [B, #MUy, #MUx, MUy, MUx, C] -> [B, #MUy*MUy, #MUx*MUx, C] + permute = ( + [0] + + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D, 1 + 2 * D))], []) + + [len(x.shape) - 1] + ) + x = x.permute(permute).reshape(B, *shape, C) + + return x + + +class Unroll(nn.Module): + """ + Reorders the tokens such that patches are contiguous in memory. + E.g., given [B, (H, W), C] and stride of (Sy, Sx), this will re-order the tokens as + [B, (Sy, Sx, H // Sy, W // Sx), C] + + This allows operations like Max2d to be computed as x.view(B, Sx*Sy, -1, C).max(dim=1). + Not only is this faster, but it also makes it easy to support inputs of arbitrary + dimensions in addition to patch-wise sparsity. + + Performing this operation multiple times in sequence puts entire windows as contiguous + in memory. For instance, if you applied the stride (2, 2) 3 times, entire windows of + size 8x8 would be contiguous in memory, allowing operations like mask unit attention + computed easily and efficiently, while also allowing max to be applied sequentially. + + Note: This means that intermediate values of the model are not in HxW order, so they + need to be re-rolled if you want to use the intermediate values as a HxW feature map. + The last block of the network is fine though, since by then the strides are all consumed. + """ + + def __init__( + self, + input_size: Tuple[int, ...], + patch_stride: Tuple[int, ...], + unroll_schedule: List[Tuple[int, ...]], + ): + super().__init__() + self.size = [i // s for i, s in zip(input_size, patch_stride)] + self.schedule = unroll_schedule + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Input: Flattened patch embeddings [B, N, C] + Output: Patch embeddings [B, N, C] permuted such that [B, 4, N//4, C].max(1) etc. performs MaxPoolNd + """ + B, _, C = x.shape + cur_size = self.size + x = x.view(*([B] + cur_size + [C])) + + for strides in self.schedule: + # Move patches with the given strides to the batch dimension + + # Create a view of the tensor with the patch stride as separate dims + # For example in 2d: [B, H // Sy, Sy, W // Sx, Sx, C] + cur_size = [i // s for i, s in zip(cur_size, strides)] + new_shape = [B] + sum([[i, s] for i, s in zip(cur_size, strides)], []) + [C] + x = x.view(new_shape) + + # Move the patch stride into the batch dimension + # For example in 2d: [B, Sy, Sx, H // Sy, W // Sx, C] + L = len(new_shape) + permute = [0] + list(range(2, L - 1, 2)) + list(range(1, L - 1, 2)) + [L - 1] + x = x.permute(permute) + + # Now finally flatten the relevant dims into the batch dimension + x = x.flatten(0, len(strides)) + B *= math.prod(strides) + + x = x.reshape(-1, math.prod(self.size), C) + return x + + +class Reroll(nn.Module): + """ + Undos the "unroll" operation so that you can use intermediate features. + """ + + def __init__( + self, + input_size: Tuple[int, ...], + patch_stride: Tuple[int, ...], + unroll_schedule: List[Tuple[int, ...]], + stage_ends: List[int], + q_pool: int, + ): + super().__init__() + self.size = [i // s for i, s in zip(input_size, patch_stride)] + + # The first stage has to reverse everything + # The next stage has to reverse all but the first unroll, etc. + self.schedule = {} + size = self.size + for i in range(stage_ends[-1] + 1): + self.schedule[i] = unroll_schedule, size + # schedule unchanged if no pooling at a stage end + if i in stage_ends[:q_pool]: + if len(unroll_schedule) > 0: + size = [n // s for n, s in zip(size, unroll_schedule[0])] + unroll_schedule = unroll_schedule[1:] + + def forward( + self, + x: torch.Tensor, + block_idx: int, + mask: torch.Tensor = None + ) -> torch.Tensor: + """ + Roll the given tensor back up to spatial order assuming it's from the given block. + + If no mask is provided: + - Returns [B, H, W, C] for 2d, [B, T, H, W, C] for 3d, etc. + If a mask is provided: + - Returns [B, #MUs, MUy, MUx, C] for 2d, etc. + """ + schedule, size = self.schedule[block_idx] + B, N, C = x.shape + + D = len(size) + cur_mu_shape = [1] * D + + for strides in schedule: + # Extract the current patch from N + x = x.view(B, *strides, N // math.prod(strides), *cur_mu_shape, C) + + # Move that patch into the current MU + # Example in 2d: [B, Sy, Sx, N//(Sy*Sx), MUy, MUx, C] -> [B, N//(Sy*Sx), Sy, MUy, Sx, MUx, C] + L = len(x.shape) + permute = ( + [0, 1 + D] + + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D + 1, L - 1))], []) + + [L - 1] + ) + x = x.permute(permute) + + # Reshape to [B, N//(Sy*Sx), *MU, C] + for i in range(D): + cur_mu_shape[i] *= strides[i] + x = x.reshape(B, -1, *cur_mu_shape, C) + N = x.shape[1] + + # Current shape (e.g., 2d: [B, #MUy*#MUx, MUy, MUx, C]) + x = x.view(B, N, *cur_mu_shape, C) + + # If masked, return [B, #MUs, MUy, MUx, C] + if mask is not None: + return x + + # If not masked, we can return [B, H, W, C] + x = undo_windowing(x, size, cur_mu_shape) + + return x + + +class MaskUnitAttention(nn.Module): + """ + Computes either Mask Unit or Global Attention. Also is able to perform q pooling. + + Note: this assumes the tokens have already been flattened and unrolled into mask units. + See `Unroll` for more details. + """ + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim: int, + dim_out: int, + heads: int, + q_stride: int = 1, + window_size: int = 0, + use_mask_unit_attn: bool = False, + ): + """ + Args: + - dim, dim_out: The input and output feature dimensions. + - heads: The number of attention heads. + - q_stride: If greater than 1, pool q with this stride. The stride should be flattened (e.g., 2x2 = 4). + - window_size: The current (flattened) size of a mask unit *after* pooling (if any). + - use_mask_unit_attn: Use Mask Unit or Global Attention. + """ + super().__init__() + + self.dim = dim + self.dim_out = dim_out + self.heads = heads + self.q_stride = q_stride + self.head_dim = dim_out // heads + self.scale = self.head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, 3 * dim_out) + self.proj = nn.Linear(dim_out, dim_out) + + self.window_size = window_size + self.use_mask_unit_attn = use_mask_unit_attn + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ Input should be of shape [batch, tokens, channels]. """ + B, N, _ = x.shape + num_windows = (N // (self.q_stride * self.window_size)) if self.use_mask_unit_attn else 1 + qkv = self.qkv(x).reshape(B, -1, num_windows, 3, self.heads, self.head_dim).permute(3, 0, 4, 2, 1, 5) + q, k, v = qkv.unbind(0) + + if self.q_stride > 1: + # Refer to Unroll to see how this performs a maxpool-Nd + q = q.view(B, self.heads, num_windows, self.q_stride, -1, self.head_dim).amax(dim=3) + + if self.fused_attn: + # Note: the original paper did *not* use SDPA, it's a free boost! + x = F.scaled_dot_product_attention(q, k, v) + else: + attn = (q * self.scale) @ k.transpose(-1, -2) + attn = attn.softmax(dim=-1) + x = attn @ v + + x = x.transpose(1, 3).reshape(B, -1, self.dim_out) + x = self.proj(x) + return x + + +class HieraBlock(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + heads: int, + mlp_ratio: float = 4.0, + drop_path: float = 0.0, + init_values: Optional[float] = None, + norm_layer: nn.Module = nn.LayerNorm, + act_layer: nn.Module = nn.GELU, + q_stride: int = 1, + window_size: int = 0, + use_expand_proj: bool = True, + use_mask_unit_attn: bool = False, + ): + super().__init__() + self.dim = dim + self.dim_out = dim_out + + self.norm1 = norm_layer(dim) + if dim != dim_out: + self.do_expand = True + if use_expand_proj: + self.proj = nn.Linear(dim, dim_out) + else: + assert dim_out == dim * 2 + self.proj = None + else: + self.do_expand = False + self.proj = None + self.attn = MaskUnitAttention( + dim, + dim_out, + heads, + q_stride, + window_size, + use_mask_unit_attn + ) + self.ls1 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0 else nn.Identity() + + self.norm2 = norm_layer(dim_out) + self.mlp = Mlp(dim_out, int(dim_out * mlp_ratio), act_layer=act_layer) + self.ls2 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0 else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # Attention + Q Pooling + x_norm = self.norm1(x) + if self.do_expand: + if self.proj is not None: + x = self.proj(x_norm) + x = x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1) # max-pool + else: + x = torch.cat([ + x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1), # max-pool + x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).mean(dim=1), # avg-pool + ], + dim=-1, + ) + x = x + self.drop_path1(self.ls1(self.attn(x_norm))) + + # MLP + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class PatchEmbed(nn.Module): + """Patch embed that supports any number of spatial dimensions (1d, 2d, 3d).""" + + def __init__( + self, + dim_in: int, + dim_out: int, + kernel: Tuple[int, ...], + stride: Tuple[int, ...], + padding: Tuple[int, ...], + reshape: bool = True, + ): + super().__init__() + + # Support any number of spatial dimensions + self.spatial_dims = len(kernel) + self.reshape = reshape + self.proj = conv_nd(self.spatial_dims)( + dim_in, + dim_out, + kernel_size=kernel, + stride=stride, + padding=padding, + ) + + def forward( + self, + x: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if mask is not None: + mask = get_resized_mask(target_size=x.shape[2:], mask=mask) + x = self.proj(x * mask.to(torch.bool)) + else: + x = self.proj(x) + if self.reshape: + x = x.reshape(x.shape[0], x.shape[1], -1).transpose(2, 1) + return x + + +class Hiera(nn.Module): + + def __init__( + self, + img_size: Tuple[int, ...] = (224, 224), + in_chans: int = 3, + embed_dim: int = 96, # initial embed dim + num_heads: int = 1, # initial number of heads + num_classes: int = 1000, + global_pool: str = 'avg', + stages: Tuple[int, ...] = (2, 3, 16, 3), + q_pool: int = 3, # number of q_pool stages + q_stride: Tuple[int, ...] = (2, 2), + mask_unit_size: Tuple[int, ...] = (8, 8), # must divide q_stride ** (#stages-1) + # mask_unit_attn: which stages use mask unit attention? + mask_unit_attn: Tuple[bool, ...] = (True, True, False, False), + use_expand_proj: bool = True, + dim_mul: float = 2.0, + head_mul: float = 2.0, + patch_kernel: Tuple[int, ...] = (7, 7), + patch_stride: Tuple[int, ...] = (4, 4), + patch_padding: Tuple[int, ...] = (3, 3), + mlp_ratio: float = 4.0, + drop_path_rate: float = 0.0, + init_values: Optional[float] = None, + fix_init: bool = True, + weight_init: str = '', + norm_layer: Union[str, nn.Module] = "LayerNorm", + drop_rate: float = 0.0, + patch_drop_rate: float = 0.0, + head_init_scale: float = 0.001, + sep_pos_embed: bool = False, + abs_win_pos_embed: bool = False, + global_pos_size: Tuple[int, int] = (14, 14), + ): + super().__init__() + self.num_classes = num_classes + self.grad_checkpointing = False + norm_layer = get_norm_layer(norm_layer) + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + + self.patch_stride = patch_stride + self.tokens_spatial_shape = [i // s for i, s in zip(img_size, patch_stride)] + num_tokens = math.prod(self.tokens_spatial_shape) + flat_mu_size = math.prod(mask_unit_size) + flat_q_stride = math.prod(q_stride) + assert q_pool < len(stages) + self.q_pool, self.q_stride = q_pool, q_stride + self.mu_size, self.mask_unit_size = flat_mu_size, mask_unit_size + self.mask_spatial_shape = [i // s for i, s in zip(self.tokens_spatial_shape, self.mask_unit_size)] + self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] + self.patch_drop_rate = patch_drop_rate + + self.patch_embed = PatchEmbed( + in_chans, + embed_dim, + patch_kernel, + patch_stride, + patch_padding, + ) + + self.pos_embed: Optional[nn.Parameter] = None + self.pos_embed_win: Optional[nn.Parameter] = None + self.pos_embed_spatial: Optional[nn.Parameter] = None + self.pos_embed_temporal: Optional[nn.Parameter] = None + if sep_pos_embed: + self.pos_embed_spatial = nn.Parameter( + torch.zeros(1, self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], embed_dim) + ) + self.pos_embed_temporal = nn.Parameter( + torch.zeros(1, self.tokens_spatial_shape[0], embed_dim) + ) + else: + if abs_win_pos_embed: + # absolute win, params NCHW to make tile & interpolate more natural before add & reshape + self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *global_pos_size)) + self.pos_embed_win = nn.Parameter(torch.zeros(1, embed_dim, *mask_unit_size)) + else: + self.pos_embed = nn.Parameter(torch.zeros(1, num_tokens, embed_dim)) + + # Setup roll and reroll modules + self.unroll = Unroll( + img_size, + patch_stride, + [q_stride] * len(self.stage_ends[:-1]) + ) + self.reroll = Reroll( + img_size, + patch_stride, + [q_stride] * len(self.stage_ends[:-1]), + self.stage_ends, + q_pool, + ) + # q_pool locations + q_pool_blocks = [x + 1 for x in self.stage_ends[:q_pool]] + + # Transformer blocks + cur_stage = 0 + depth = sum(stages) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList() + self.feature_info = [] + for i in range(depth): + dim_out = embed_dim + # Mask unit or global attention. + # Lag by 1 block, so that global attention, + # applied post pooling on lower resolution + use_mask_unit_attn = mask_unit_attn[cur_stage] + + if i - 1 in self.stage_ends: + dim_out = int(embed_dim * dim_mul) + num_heads = int(num_heads * head_mul) + cur_stage += 1 + if i in q_pool_blocks: + flat_mu_size //= flat_q_stride + + block = HieraBlock( + dim=embed_dim, + dim_out=dim_out, + heads=num_heads, + mlp_ratio=mlp_ratio, + drop_path=dpr[i], + init_values=init_values, + norm_layer=norm_layer, + q_stride=(flat_q_stride if i in q_pool_blocks else 1), + window_size=flat_mu_size, + use_expand_proj=use_expand_proj, + use_mask_unit_attn=use_mask_unit_attn, + ) + embed_dim = dim_out + if i in self.stage_ends: + self.feature_info += [ + dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')] + self.blocks.append(block) + + self.num_features = self.head_hidden_size = embed_dim + self.head = ClNormMlpClassifierHead( + embed_dim, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + norm_layer=norm_layer, + input_fmt='NLC', + ) + + # Initialize everything + if sep_pos_embed: + nn.init.trunc_normal_(self.pos_embed_spatial, std=0.02) + nn.init.trunc_normal_(self.pos_embed_temporal, std=0.02) + else: + if self.pos_embed is not None: + nn.init.trunc_normal_(self.pos_embed, std=0.02) + if self.pos_embed_win is not None: + nn.init.trunc_normal_(self.pos_embed_win, std=0.02) + + if weight_init != 'skip': + init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit + init_fn = partial(init_fn, classifier_name='head.fc') + named_apply(init_fn, self) + if fix_init: + self.fix_init_weight() + if isinstance(self.head.fc, nn.Linear): + self.head.fc.weight.data.mul_(head_init_scale) + self.head.fc.bias.data.mul_(head_init_scale) + + def fix_init_weight(self): + def rescale(param, _layer_id): + param.div_(math.sqrt(2.0 * _layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + @torch.jit.ignore + def no_weight_decay(self): + if self.pos_embed is not None: + return ["pos_embed"] + elif self.pos_embed_abs is not None: + return ['pos_embed_abs', 'pos_embed_win'] + else: + return ["pos_embed_spatial", "pos_embed_temporal"] + + @torch.jit.ignore + def group_matcher(self, coarse: bool = False) -> Dict: + return dict( + stem=r'^pos_embed|pos_embed_spatial|pos_embed_temporal|pos_embed_abs|pos_embed_win|patch_embed', + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable: bool = True) -> None: + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool, reset_other=reset_other) + + def get_random_mask(self, x: torch.Tensor, mask_ratio: float) -> torch.Tensor: + """ + Generates a random mask, mask_ratio fraction are dropped. + 1 is *keep*, 0 is *remove*. Useful for MAE, FLIP, etc. + """ + B = x.shape[0] + # Tokens selected for masking at mask unit level + num_windows = math.prod(self.mask_spatial_shape) # num_mask_units + len_keep = int(num_windows * (1 - mask_ratio)) + noise = torch.rand(B, num_windows, device=x.device) + + # Sort noise for each sample + ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # Generate the binary mask: 1 is *keep*, 0 is *remove* + # Note this is opposite to original MAE + mask = torch.zeros([B, num_windows], device=x.device) + mask[:, :len_keep] = 1 + # Unshuffle to get the binary mask + mask = torch.gather(mask, dim=1, index=ids_restore) + + return mask.bool() + + def _pos_embed(self, x) -> torch.Tensor: + if self.pos_embed_win is not None: + # absolute win position embedding, from + # Window Attention is Bugged: How not to Interpolate Position Embeddings (https://arxiv.org/abs/2311.05613) + pos_embed_win = self.pos_embed_win.tile(self.mask_spatial_shape) + pos_embed = F.interpolate( + self.pos_embed, + size=pos_embed_win.shape[-2:], + mode='bicubic', + antialias=True, + ) + pos_embed = pos_embed + pos_embed_win + pos_embed = pos_embed.flatten(2).transpose(1, 2) + elif self.pos_embed is not None: + pos_embed = self.pos_embed + else: + pos_embed = ( + self.pos_embed_spatial.repeat(1, self.tokens_spatial_shape[0], 1) + + + torch.repeat_interleave( + self.pos_embed_temporal, + self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], + dim=1, + ) + ) + x = x + pos_embed + return x + + def forward_intermediates( + self, + x: torch.Tensor, + mask: Optional[torch.Tensor] = None, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = True, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + coarse: bool = True, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert not norm, 'normalization of features not supported' + assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.' + if coarse: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + take_indices = [self.stage_ends[i] for i in take_indices] + max_index = self.stage_ends[max_index] + else: + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + if mask is not None: + patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape + else: + patch_mask = None + x = self.patch_embed(x, mask=patch_mask) + x = self._pos_embed(x) + x = self.unroll(x) + + # Discard masked tokens + if mask is not None: + x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1]) + + intermediates = [] + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x) + if i in take_indices: + x_int = self.reroll(x, i, mask=mask) + intermediates.append(x_int.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x_int) + + if intermediates_only: + return intermediates + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + coarse: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + if coarse: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + max_index = self.stage_ends[max_index] + else: + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_head: + self.head.reset(0, reset_other=True) + return take_indices + + def forward_features( + self, + x: torch.Tensor, + mask: Optional[torch.Tensor] = None, + return_intermediates: bool = False, + ) -> torch.Tensor: + """ + mask should be a boolean tensor of shape [B, #MUt*#MUy*#MUx] where #MU are the number of mask units in that dim. + Note: 1 in mask is *keep*, 0 is *remove*; mask.sum(dim=-1) should be the same across the batch. + """ + if self.training and self.patch_drop_rate > 0: + # using mask for something like 'patch dropout' via mask-units in supervised train / fine-tune + assert mask is None + mask = self.get_random_mask(x, mask_ratio=self.patch_drop_rate) + + if mask is not None: + patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape + else: + patch_mask = None + x = self.patch_embed(x, mask=patch_mask) + x = self._pos_embed(x) + x = self.unroll(x) + + # Discard masked tokens + if mask is not None: + x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1]) + + intermediates = [] + for i, blk in enumerate(self.blocks): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x) + else: + x = blk(x) + if return_intermediates and i in self.stage_ends: + intermediates.append(self.reroll(x, i, mask=mask)) + + # x may not always be in spatial order here. + # e.g. if q_pool = 2, mask_unit_size = (8, 8), and + # q_stride = (2, 2), not all unrolls were consumed, + # intermediates[-1] is x in spatial order + if return_intermediates: + return x, intermediates + + return x + + def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: + x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + return x + + def forward( + self, + x: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + x = self.forward_features(x, mask=mask) + if mask is None: + x = self.forward_head(x) + return x + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + "hiera_tiny_224.mae_in1k_ft_in1k": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + ), + "hiera_tiny_224.mae": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + num_classes=0, + ), + + "hiera_small_224.mae_in1k_ft_in1k": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + ), + "hiera_small_224.mae": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + num_classes=0, + ), + + "hiera_base_224.mae_in1k_ft_in1k": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + ), + "hiera_base_224.mae": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + num_classes=0, + ), + + "hiera_base_plus_224.mae_in1k_ft_in1k": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + ), + "hiera_base_plus_224.mae": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + num_classes=0, + ), + + "hiera_large_224.mae_in1k_ft_in1k": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + ), + "hiera_large_224.mae": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + num_classes=0, + ), + + "hiera_huge_224.mae_in1k_ft_in1k": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + ), + "hiera_huge_224.mae": _cfg( + hf_hub_id='timm/', + license='cc-by-nc-4.0', + num_classes=0, + ), + + "hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k": _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95, + ), + "hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k": _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95, + ), + "hiera_small_abswin_256.sbb2_e200_in12k": _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95, + ), + "hiera_small_abswin_256.sbb2_pd_e200_in12k": _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), crop_pct=0.95, + ), + "hiera_base_abswin_256.untrained": _cfg( + # hf_hub_id='timm/', + input_size=(3, 256, 256), crop_pct=0.95, + ), +}) + + +def checkpoint_filter_fn(state_dict, model=None): + state_dict = state_dict.get('model_state', state_dict) + output = {} + for k, v in state_dict.items(): + # if k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: + # # To resize pos embedding when using model at different size from pretrained weights + # from timm.layers import resample_abs_pos_embed + # v = resample_abs_pos_embed( + # v, + # new_size=(64, 64), + # num_prefix_tokens=0, + # verbose=True, + # ) + if 'head.projection.' in k: + k = k.replace('head.projection.', 'head.fc.') + if k.startswith('encoder_norm.'): + k = k.replace('encoder_norm.', 'head.norm.') + elif k.startswith('norm.'): + k = k.replace('norm.', 'head.norm.') + if k == 'pos_embed_abs': + k = 'pos_embed' + output[k] = v + return output + + +def _create_hiera(variant: str, pretrained: bool = False, **kwargs) -> Hiera: + out_indices = kwargs.pop('out_indices', 4) + + return build_model_with_cfg( + Hiera, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +@register_model +def hiera_tiny_224(pretrained=False, **kwargs): + model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 7, 2)) + return _create_hiera('hiera_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hiera_small_224(pretrained=False, **kwargs): + model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 11, 2)) + return _create_hiera('hiera_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hiera_base_224(pretrained=False, **kwargs): + model_args = dict(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3)) + return _create_hiera('hiera_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hiera_base_plus_224(pretrained=False, **kwargs): + model_args = dict(embed_dim=112, num_heads=2, stages=(2, 3, 16, 3)) + return _create_hiera('hiera_base_plus_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hiera_large_224(pretrained=False, **kwargs): + model_args = dict(embed_dim=144, num_heads=2, stages=(2, 6, 36, 4)) + return _create_hiera('hiera_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hiera_huge_224(pretrained=False, **kwargs): + model_args = dict(embed_dim=256, num_heads=4, stages=(2, 6, 36, 4)) + return _create_hiera('hiera_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hiera_small_abswin_256(pretrained=False, **kwargs): + model_args = dict( + embed_dim=96, num_heads=1, stages=(1, 2, 11, 2), abs_win_pos_embed=True, global_pos_size=(16, 16), + init_values=1e-5, weight_init='jax', use_expand_proj=False, + ) + return _create_hiera('hiera_small_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hiera_base_abswin_256(pretrained=False, **kwargs): + model_args = dict( + embed_dim=96, num_heads=1, stages=(2, 3, 16, 3), abs_win_pos_embed=True, init_values=1e-5, weight_init='jax') + return _create_hiera('hiera_base_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/hieradet_sam2.py b/pytorch-image-models/timm/models/hieradet_sam2.py new file mode 100644 index 0000000000000000000000000000000000000000..d9585a526c6640b45a3e66094fdd618f5bad2bdc --- /dev/null +++ b/pytorch-image-models/timm/models/hieradet_sam2.py @@ -0,0 +1,635 @@ +import math +from copy import deepcopy +from functools import partial +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.jit import Final + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, DropPath, ClNormMlpClassifierHead, LayerScale, \ + get_norm_layer, get_act_layer, init_weight_jax, init_weight_vit, to_2tuple, use_fused_attn + +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + + +def window_partition(x, window_size: Tuple[int, int]): + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +def window_unpartition(windows: torch.Tensor, window_size: Tuple[int, int], hw: Tuple[int, int]): + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + hw (Tuple): original height and width (H, W) before padding. + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + H, W = hw + B = windows.shape[0] // (H * W // window_size[0] // window_size[1]) + x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +def _calc_pad(H: int, W: int, window_size: Tuple[int, int]) -> Tuple[int, int, int, int]: + pad_h = (window_size[0] - H % window_size[0]) % window_size[0] + pad_w = (window_size[1] - W % window_size[1]) % window_size[1] + Hp, Wp = H + pad_h, W + pad_w + return Hp, Wp, pad_h, pad_w + + +class MultiScaleAttention(nn.Module): + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + q_pool: nn.Module = None, + ): + super().__init__() + self.dim = dim + self.dim_out = dim_out + self.num_heads = num_heads + head_dim = dim_out // num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.q_pool = q_pool + self.qkv = nn.Linear(dim, dim_out * 3) + self.proj = nn.Linear(dim_out, dim_out) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + + # qkv with shape (B, H * W, 3, nHead, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) + + # q, k, v with shape (B, H * W, nheads, C) + q, k, v = torch.unbind(qkv, 2) + + # Q pooling (for downsample at stage changes) + if self.q_pool is not None: + q = q.reshape(B, H, W, -1).permute(0, 3, 1, 2) # to BCHW for pool + q = self.q_pool(q).permute(0, 2, 3, 1) + H, W = q.shape[1:3] # downsampled shape + q = q.reshape(B, H * W, self.num_heads, -1) + + # Torch's SDPA expects [B, nheads, H*W, C] so we transpose + q = q.transpose(1, 2) + k = k.transpose(1, 2) + v = v.transpose(1, 2) + if self.fused_attn: + x = F.scaled_dot_product_attention(q, k, v) + else: + q = q * self.scale + attn = q @ k.transpose(-1, -2) + attn = attn.softmax(dim=-1) + x = attn @ v + + # Transpose back + x = x.transpose(1, 2).reshape(B, H, W, -1) + + x = self.proj(x) + return x + + +class MultiScaleBlock(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + num_heads: int, + mlp_ratio: float = 4.0, + q_stride: Optional[Tuple[int, int]] = None, + norm_layer: Union[nn.Module, str] = "LayerNorm", + act_layer: Union[nn.Module, str] = "GELU", + window_size: int = 0, + init_values: Optional[float] = None, + drop_path: float = 0.0, + ): + super().__init__() + norm_layer = get_norm_layer(norm_layer) + act_layer = get_act_layer(act_layer) + self.window_size = to_2tuple(window_size) + self.is_windowed = any(self.window_size) + self.dim = dim + self.dim_out = dim_out + self.q_stride = q_stride + + if dim != dim_out: + self.proj = nn.Linear(dim, dim_out) + else: + self.proj = nn.Identity() + self.pool = None + if self.q_stride: + # note make a different instance for this Module so that it's not shared with attn module + self.pool = nn.MaxPool2d( + kernel_size=q_stride, + stride=q_stride, + ceil_mode=False, + ) + + self.norm1 = norm_layer(dim) + self.attn = MultiScaleAttention( + dim, + dim_out, + num_heads=num_heads, + q_pool=deepcopy(self.pool), + ) + self.ls1 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim_out) + self.mlp = Mlp( + dim_out, + int(dim_out * mlp_ratio), + act_layer=act_layer, + ) + self.ls2 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x # B, H, W, C + x = self.norm1(x) + + # Skip connection + if self.dim != self.dim_out: + shortcut = self.proj(x) + if self.pool is not None: + shortcut = shortcut.permute(0, 3, 1, 2) + shortcut = self.pool(shortcut).permute(0, 2, 3, 1) + + # Window partition + window_size = self.window_size + H, W = x.shape[1:3] + Hp, Wp = H, W # keep torchscript happy + if self.is_windowed: + Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size) + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + x = window_partition(x, window_size) + + # Window Attention + Q Pooling (if stage change) + x = self.attn(x) + if self.q_stride is not None: + # Shapes have changed due to Q pooling + window_size = (self.window_size[0] // self.q_stride[0], self.window_size[1] // self.q_stride[1]) + H, W = shortcut.shape[1:3] + Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size) + + # Reverse window partition + if self.is_windowed: + x = window_unpartition(x, window_size, (Hp, Wp)) + x = x[:, :H, :W, :].contiguous() # unpad + + x = shortcut + self.drop_path1(self.ls1(x)) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class HieraPatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, ...] = (7, 7), + stride: Tuple[int, ...] = (4, 4), + padding: Tuple[int, ...] = (3, 3), + in_chans: int = 3, + embed_dim: int = 768, + ): + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x + + +class HieraDet(nn.Module): + """ + Reference: https://arxiv.org/abs/2306.00989 + """ + + def __init__( + self, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + embed_dim: int = 96, # initial embed dim + num_heads: int = 1, # initial number of heads + patch_kernel: Tuple[int, ...] = (7, 7), + patch_stride: Tuple[int, ...] = (4, 4), + patch_padding: Tuple[int, ...] = (3, 3), + patch_size: Optional[Tuple[int, ...]] = None, + q_pool: int = 3, # number of q_pool stages + q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages + stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage + dim_mul: float = 2.0, # dim_mul factor at stage shift + head_mul: float = 2.0, # head_mul factor at stage shift + global_pos_size: Tuple[int, int] = (7, 7), + # window size per stage, when not using global att. + window_spec: Tuple[int, ...] = ( + 8, + 4, + 14, + 7, + ), + # global attn in these blocks + global_att_blocks: Tuple[int, ...] = ( + 12, + 16, + 20, + ), + init_values: Optional[float] = None, + weight_init: str = '', + fix_init: bool = True, + head_init_scale: float = 0.001, + drop_rate: float = 0.0, + drop_path_rate: float = 0.0, # stochastic depth + norm_layer: Union[nn.Module, str] = "LayerNorm", + act_layer: Union[nn.Module, str] = "GELU", + ): + super().__init__() + norm_layer = get_norm_layer(norm_layer) + act_layer = get_act_layer(act_layer) + assert len(stages) == len(window_spec) + self.num_classes = num_classes + self.window_spec = window_spec + self.output_fmt = 'NHWC' + + depth = sum(stages) + self.q_stride = q_stride + self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] + assert 0 <= q_pool <= len(self.stage_ends[:-1]) + self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] + + if patch_size is not None: + # use a non-overlapping vit style patch embed + self.patch_embed = PatchEmbed( + img_size=None, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + output_fmt='NHWC', + dynamic_img_pad=True, + ) + else: + self.patch_embed = HieraPatchEmbed( + kernel_size=patch_kernel, + stride=patch_stride, + padding=patch_padding, + in_chans=in_chans, + embed_dim=embed_dim, + ) + # Which blocks have global att? + self.global_att_blocks = global_att_blocks + + # Windowed positional embedding (https://arxiv.org/abs/2311.05613) + self.global_pos_size = global_pos_size + self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.global_pos_size)) + self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + cur_stage = 0 + self.blocks = nn.Sequential() + self.feature_info = [] + for i in range(depth): + dim_out = embed_dim + # lags by a block, so first block of + # next stage uses an initial window size + # of previous stage and final window size of current stage + window_size = self.window_spec[cur_stage] + + if self.global_att_blocks is not None: + window_size = 0 if i in self.global_att_blocks else window_size + + if i - 1 in self.stage_ends: + dim_out = int(embed_dim * dim_mul) + num_heads = int(num_heads * head_mul) + cur_stage += 1 + + block = MultiScaleBlock( + dim=embed_dim, + dim_out=dim_out, + num_heads=num_heads, + drop_path=dpr[i], + q_stride=self.q_stride if i in self.q_pool_blocks else None, + window_size=window_size, + norm_layer=norm_layer, + act_layer=act_layer, + ) + + embed_dim = dim_out + self.blocks.append(block) + if i in self.stage_ends: + self.feature_info += [ + dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')] + + self.num_features = self.head_hidden_size = embed_dim + self.head = ClNormMlpClassifierHead( + embed_dim, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + norm_layer=norm_layer, + ) + + # Initialize everything + if self.pos_embed is not None: + nn.init.trunc_normal_(self.pos_embed, std=0.02) + + if self.pos_embed_window is not None: + nn.init.trunc_normal_(self.pos_embed_window, std=0.02) + + if weight_init != 'skip': + init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit + init_fn = partial(init_fn, classifier_name='head.fc') + named_apply(init_fn, self) + + if fix_init: + self.fix_init_weight() + + if isinstance(self.head, ClNormMlpClassifierHead) and isinstance(self.head.fc, nn.Linear): + self.head.fc.weight.data.mul_(head_init_scale) + self.head.fc.bias.data.mul_(head_init_scale) + + def _pos_embed(self, x: torch.Tensor) -> torch.Tensor: + h, w = x.shape[1:3] + window_embed = self.pos_embed_window + pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic") + tile_h = pos_embed.shape[-2] // window_embed.shape[-2] + tile_w = pos_embed.shape[-1] // window_embed.shape[-1] + pos_embed = pos_embed + window_embed.tile((tile_h, tile_w)) + pos_embed = pos_embed.permute(0, 2, 3, 1) + return x + pos_embed + + def fix_init_weight(self): + def rescale(param, _layer_id): + param.div_(math.sqrt(2.0 * _layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + @torch.jit.ignore + def no_weight_decay(self): + return ['pos_embed', 'pos_embed_window'] + + @torch.jit.ignore + def group_matcher(self, coarse: bool = False) -> Dict: + return dict( + stem=r'^pos_embed|pos_embed_window|patch_embed', + blocks=[(r'^blocks\.(\d+)', None)] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable: bool = True) -> None: + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False): + self.num_classes = num_classes + self.head.reset(num_classes, pool_type=global_pool, reset_other=reset_other) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = True, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + coarse: bool = True, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + coarse: Take coarse features (stage ends) if true, otherwise all block featrures + Returns: + + """ + assert not norm, 'normalization of features not supported' + assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.' + if coarse: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + take_indices = [self.stage_ends[i] for i in take_indices] + max_index = self.stage_ends[max_index] + else: + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + x = self.patch_embed(x) + x = self._pos_embed(x) + + intermediates = [] + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x) + if i in take_indices: + x_out = x.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x + intermediates.append(x_out) + + if intermediates_only: + return intermediates + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + coarse: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + if coarse: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + max_index = self.stage_ends[max_index] + else: + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_head: + self.head.reset(0, reset_other=prune_norm) + return take_indices + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) # BHWC + x = self._pos_embed(x) + for i, blk in enumerate(self.blocks): + x = blk(x) + return x + + def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: + x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +# NOTE sam2 appears to use 1024x1024 for all models, but T, S, & B+ have windows that fit multiples of 224. +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 0, 'input_size': (3, 896, 896), 'pool_size': (28, 28), + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'min_input_size': (3, 224, 224), + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + "sam2_hiera_tiny.r224": _cfg( + hf_hub_id='facebook/sam2-hiera-tiny', + hf_hub_filename='sam2_hiera_tiny.pt', + input_size=(3, 224, 224), pool_size=(7, 7), + ), # FIXME reduced res for testing + "sam2_hiera_tiny.r896": _cfg( + hf_hub_id='facebook/sam2-hiera-tiny', + hf_hub_filename='sam2_hiera_tiny.pt', + ), + "sam2_hiera_small": _cfg( + hf_hub_id='facebook/sam2-hiera-small', + hf_hub_filename='sam2_hiera_small.pt', + ), + "sam2_hiera_base_plus": _cfg( + hf_hub_id='facebook/sam2-hiera-base-plus', + hf_hub_filename='sam2_hiera_base_plus.pt', + ), + "sam2_hiera_large": _cfg( + hf_hub_id='facebook/sam2-hiera-large', + hf_hub_filename='sam2_hiera_large.pt', + min_input_size=(3, 256, 256), + input_size=(3, 1024, 1024), pool_size=(32, 32), + ), + "hieradet_small.untrained": _cfg( + num_classes=1000, + input_size=(3, 256, 256), pool_size=(8, 8), + ), +}) + + +def checkpoint_filter_fn(state_dict, model=None, prefix=''): + state_dict = state_dict.get('model', state_dict) + + output = {} + for k, v in state_dict.items(): + if k.startswith(prefix): + k = k.replace(prefix, '') + else: + continue + k = k.replace('mlp.layers.0', 'mlp.fc1') + k = k.replace('mlp.layers.1', 'mlp.fc2') + output[k] = v + return output + + +def _create_hiera_det(variant: str, pretrained: bool = False, **kwargs) -> HieraDet: + out_indices = kwargs.pop('out_indices', 4) + checkpoint_prefix = '' + if 'sam2' in variant: + # SAM2 pretrained weights have no classifier or final norm-layer (`head.norm`) + # This is workaround loading with num_classes=0 w/o removing norm-layer. + kwargs.setdefault('pretrained_strict', False) + checkpoint_prefix = 'image_encoder.trunk.' + return build_model_with_cfg( + HieraDet, + variant, + pretrained, + pretrained_filter_fn=partial(checkpoint_filter_fn, prefix=checkpoint_prefix), + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +@register_model +def sam2_hiera_tiny(pretrained=False, **kwargs): + model_args = dict(stages=(1, 2, 7, 2), global_att_blocks=(5, 7, 9)) + return _create_hiera_det('sam2_hiera_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def sam2_hiera_small(pretrained=False, **kwargs): + model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13)) + return _create_hiera_det('sam2_hiera_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def sam2_hiera_base_plus(pretrained=False, **kwargs): + model_args = dict(embed_dim=112, num_heads=2, global_pos_size=(14, 14)) + return _create_hiera_det('sam2_hiera_base_plus', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def sam2_hiera_large(pretrained=False, **kwargs): + model_args = dict( + embed_dim=144, + num_heads=2, + stages=(2, 6, 36, 4), + global_att_blocks=(23, 33, 43), + window_spec=(8, 4, 16, 8), + ) + return _create_hiera_det('sam2_hiera_large', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def hieradet_small(pretrained=False, **kwargs): + model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13), window_spec=(8, 4, 16, 8), init_values=1e-5) + return _create_hiera_det('hieradet_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +# @register_model +# def hieradet_base(pretrained=False, **kwargs): +# model_args = dict(window_spec=(8, 4, 16, 8)) +# return _create_hiera_det('hieradet_base', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/hub.py b/pytorch-image-models/timm/models/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..85abb264915b0e2696fe0a04ea3f8a5cc9fec020 --- /dev/null +++ b/pytorch-image-models/timm/models/hub.py @@ -0,0 +1,4 @@ +from ._hub import * + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning) diff --git a/pytorch-image-models/timm/models/inception_next.py b/pytorch-image-models/timm/models/inception_next.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4906aa05d1af9eeaef8a9c68626d5a20deb0fc --- /dev/null +++ b/pytorch-image-models/timm/models/inception_next.py @@ -0,0 +1,445 @@ +""" +InceptionNeXt paper: https://arxiv.org/abs/2303.16900 +Original implementation & weights from: https://github.com/sail-sg/inceptionnext +""" + +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import trunc_normal_, DropPath, to_2tuple, get_padding, SelectAdaptivePool2d +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['MetaNeXt'] + + +class InceptionDWConv2d(nn.Module): + """ Inception depthwise convolution + """ + + def __init__( + self, + in_chs, + square_kernel_size=3, + band_kernel_size=11, + branch_ratio=0.125, + dilation=1, + ): + super().__init__() + + gc = int(in_chs * branch_ratio) # channel numbers of a convolution branch + square_padding = get_padding(square_kernel_size, dilation=dilation) + band_padding = get_padding(band_kernel_size, dilation=dilation) + self.dwconv_hw = nn.Conv2d( + gc, gc, square_kernel_size, + padding=square_padding, dilation=dilation, groups=gc) + self.dwconv_w = nn.Conv2d( + gc, gc, (1, band_kernel_size), + padding=(0, band_padding), dilation=(1, dilation), groups=gc) + self.dwconv_h = nn.Conv2d( + gc, gc, (band_kernel_size, 1), + padding=(band_padding, 0), dilation=(dilation, 1), groups=gc) + self.split_indexes = (in_chs - 3 * gc, gc, gc, gc) + + def forward(self, x): + x_id, x_hw, x_w, x_h = torch.split(x, self.split_indexes, dim=1) + return torch.cat(( + x_id, + self.dwconv_hw(x_hw), + self.dwconv_w(x_w), + self.dwconv_h(x_h) + ), dim=1, + ) + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + copied from timm: https://github.com/huggingface/pytorch-image-models/blob/v0.6.11/timm/models/layers/mlp.py + """ + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.ReLU, + norm_layer=None, + bias=True, + drop=0., + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.drop = nn.Dropout(drop) + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x + + +class MlpClassifierHead(nn.Module): + """ MLP classification head + """ + + def __init__( + self, + in_features, + num_classes=1000, + pool_type='avg', + mlp_ratio=3, + act_layer=nn.GELU, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + drop=0., + bias=True + ): + super().__init__() + self.use_conv = False + self.in_features = in_features + self.num_features = hidden_features = int(mlp_ratio * in_features) + + assert pool_type, 'Cannot disable pooling' + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) + + self.fc1 = nn.Linear(in_features * self.global_pool.feat_mult(), hidden_features, bias=bias) + self.act = act_layer() + self.norm = norm_layer(hidden_features) + self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) + self.drop = nn.Dropout(drop) + + def reset(self, num_classes: int, pool_type: Optional[str] = None): + if pool_type is not None: + assert pool_type, 'Cannot disable pooling' + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) + + self.fc2 = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.fc1(x) + x = self.act(x) + x = self.norm(x) + x = self.drop(x) + return x if pre_logits else self.fc2(x) + + +class MetaNeXtBlock(nn.Module): + """ MetaNeXtBlock Block + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + ls_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + dim, + dilation=1, + token_mixer=InceptionDWConv2d, + norm_layer=nn.BatchNorm2d, + mlp_layer=ConvMlp, + mlp_ratio=4, + act_layer=nn.GELU, + ls_init_value=1e-6, + drop_path=0., + + ): + super().__init__() + self.token_mixer = token_mixer(dim, dilation=dilation) + self.norm = norm_layer(dim) + self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=act_layer) + self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.token_mixer(x) + x = self.norm(x) + x = self.mlp(x) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + x = self.drop_path(x) + shortcut + return x + + +class MetaNeXtStage(nn.Module): + def __init__( + self, + in_chs, + out_chs, + stride=2, + depth=2, + dilation=(1, 1), + drop_path_rates=None, + ls_init_value=1.0, + token_mixer=InceptionDWConv2d, + act_layer=nn.GELU, + norm_layer=None, + mlp_ratio=4, + ): + super().__init__() + self.grad_checkpointing = False + if stride > 1 or dilation[0] != dilation[1]: + self.downsample = nn.Sequential( + norm_layer(in_chs), + nn.Conv2d( + in_chs, + out_chs, + kernel_size=2, + stride=stride, + dilation=dilation[0], + ), + ) + else: + self.downsample = nn.Identity() + + drop_path_rates = drop_path_rates or [0.] * depth + stage_blocks = [] + for i in range(depth): + stage_blocks.append(MetaNeXtBlock( + dim=out_chs, + dilation=dilation[1], + drop_path=drop_path_rates[i], + ls_init_value=ls_init_value, + token_mixer=token_mixer, + act_layer=act_layer, + norm_layer=norm_layer, + mlp_ratio=mlp_ratio, + )) + self.blocks = nn.Sequential(*stage_blocks) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class MetaNeXt(nn.Module): + r""" MetaNeXt + A PyTorch impl of : `InceptionNeXt: When Inception Meets ConvNeXt` - https://arxiv.org/abs/2303.16900 + + Args: + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 9, 3) + dims (tuple(int)): Feature dimension at each stage. Default: (96, 192, 384, 768) + token_mixers: Token mixer function. Default: nn.Identity + norm_layer: Normalization layer. Default: nn.BatchNorm2d + act_layer: Activation function for MLP. Default: nn.GELU + mlp_ratios (int or tuple(int)): MLP ratios. Default: (4, 4, 4, 3) + drop_rate (float): Head dropout rate + drop_path_rate (float): Stochastic depth rate. Default: 0. + ls_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + output_stride=32, + depths=(3, 3, 9, 3), + dims=(96, 192, 384, 768), + token_mixers=InceptionDWConv2d, + norm_layer=nn.BatchNorm2d, + act_layer=nn.GELU, + mlp_ratios=(4, 4, 4, 3), + drop_rate=0., + drop_path_rate=0., + ls_init_value=1e-6, + ): + super().__init__() + + num_stage = len(depths) + if not isinstance(token_mixers, (list, tuple)): + token_mixers = [token_mixers] * num_stage + if not isinstance(mlp_ratios, (list, tuple)): + mlp_ratios = [mlp_ratios] * num_stage + self.num_classes = num_classes + self.global_pool = global_pool + self.drop_rate = drop_rate + self.feature_info = [] + + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), + norm_layer(dims[0]) + ) + + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + prev_chs = dims[0] + curr_stride = 4 + dilation = 1 + # feature resolution stages, each consisting of multiple residual blocks + self.stages = nn.Sequential() + for i in range(num_stage): + stride = 2 if curr_stride == 2 or i > 0 else 1 + if curr_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + curr_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + out_chs = dims[i] + self.stages.append(MetaNeXtStage( + prev_chs, + out_chs, + stride=stride if i > 0 else 1, + dilation=(first_dilation, dilation), + depth=depths[i], + drop_path_rates=dp_rates[i], + ls_init_value=ls_init_value, + act_layer=act_layer, + token_mixer=token_mixers[i], + norm_layer=norm_layer, + mlp_ratio=mlp_ratios[i], + )) + prev_chs = out_chs + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + self.num_features = prev_chs + self.head = MlpClassifierHead(self.num_features, num_classes, pool_type=self.global_pool, drop=drop_rate) + self.head_hidden_size = self.head.num_features + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2d, nn.Linear)): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.downsample', (0,)), # blocks + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + ] + ) + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc2 + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def no_weight_decay(self): + return set() + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head.fc2', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'inception_next_atto.sail_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_atto.pth', + ), + 'inception_next_tiny.sail_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_tiny.pth', + ), + 'inception_next_small.sail_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_small.pth', + ), + 'inception_next_base.sail_in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base.pth', + crop_pct=0.95, + ), + 'inception_next_base.sail_in1k_384': _cfg( + hf_hub_id='timm/', + # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), +}) + + +def _create_inception_next(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + MetaNeXt, variant, pretrained, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs, + ) + return model + + +@register_model +def inception_next_atto(pretrained=False, **kwargs): + model_args = dict( + depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), + token_mixers=partial(InceptionDWConv2d, band_kernel_size=9, branch_ratio=0.25) + ) + return _create_inception_next('inception_next_atto', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def inception_next_tiny(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), + token_mixers=InceptionDWConv2d, + ) + return _create_inception_next('inception_next_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def inception_next_small(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 3, 27, 3), dims=(96, 192, 384, 768), + token_mixers=InceptionDWConv2d, + ) + return _create_inception_next('inception_next_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def inception_next_base(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024), + token_mixers=InceptionDWConv2d, + ) + return _create_inception_next('inception_next_base', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/inception_resnet_v2.py b/pytorch-image-models/timm/models/inception_resnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..7fdfee41edb0c691b43030ec42dbd50674a3cbfe --- /dev/null +++ b/pytorch-image-models/timm/models/inception_resnet_v2.py @@ -0,0 +1,341 @@ +""" Pytorch Inception-Resnet-V2 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +from functools import partial +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import create_classifier, ConvNormAct +from ._builder import build_model_with_cfg +from ._manipulate import flatten_modules +from ._registry import register_model, generate_default_cfgs, register_model_deprecations + +__all__ = ['InceptionResnetV2'] + + +class Mixed_5b(nn.Module): + def __init__(self, conv_block=None): + super(Mixed_5b, self).__init__() + conv_block = conv_block or ConvNormAct + + self.branch0 = conv_block(192, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + conv_block(192, 48, kernel_size=1, stride=1), + conv_block(48, 64, kernel_size=5, stride=1, padding=2) + ) + + self.branch2 = nn.Sequential( + conv_block(192, 64, kernel_size=1, stride=1), + conv_block(64, 96, kernel_size=3, stride=1, padding=1), + conv_block(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + conv_block(192, 64, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block35(nn.Module): + def __init__(self, scale=1.0, conv_block=None): + super(Block35, self).__init__() + self.scale = scale + conv_block = conv_block or ConvNormAct + + self.branch0 = conv_block(320, 32, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + conv_block(320, 32, kernel_size=1, stride=1), + conv_block(32, 32, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + conv_block(320, 32, kernel_size=1, stride=1), + conv_block(32, 48, kernel_size=3, stride=1, padding=1), + conv_block(48, 64, kernel_size=3, stride=1, padding=1) + ) + + self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) + self.act = nn.ReLU() + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.act(out) + return out + + +class Mixed_6a(nn.Module): + def __init__(self, conv_block=None): + super(Mixed_6a, self).__init__() + conv_block = conv_block or ConvNormAct + + self.branch0 = conv_block(320, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + conv_block(320, 256, kernel_size=1, stride=1), + conv_block(256, 256, kernel_size=3, stride=1, padding=1), + conv_block(256, 384, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Block17(nn.Module): + def __init__(self, scale=1.0, conv_block=None): + super(Block17, self).__init__() + self.scale = scale + conv_block = conv_block or ConvNormAct + + self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + conv_block(1088, 128, kernel_size=1, stride=1), + conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), + conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) + self.act = nn.ReLU() + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.act(out) + return out + + +class Mixed_7a(nn.Module): + def __init__(self, conv_block=None): + super(Mixed_7a, self).__init__() + conv_block = conv_block or ConvNormAct + + self.branch0 = nn.Sequential( + conv_block(1088, 256, kernel_size=1, stride=1), + conv_block(256, 384, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + conv_block(1088, 256, kernel_size=1, stride=1), + conv_block(256, 288, kernel_size=3, stride=2) + ) + + self.branch2 = nn.Sequential( + conv_block(1088, 256, kernel_size=1, stride=1), + conv_block(256, 288, kernel_size=3, stride=1, padding=1), + conv_block(288, 320, kernel_size=3, stride=2) + ) + + self.branch3 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block8(nn.Module): + + def __init__(self, scale=1.0, no_relu=False, conv_block=None): + super(Block8, self).__init__() + self.scale = scale + conv_block = conv_block or ConvNormAct + + self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + conv_block(2080, 192, kernel_size=1, stride=1), + conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), + conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + ) + + self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) + self.relu = None if no_relu else nn.ReLU() + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + if self.relu is not None: + out = self.relu(out) + return out + + +class InceptionResnetV2(nn.Module): + def __init__( + self, + num_classes=1000, + in_chans=3, + drop_rate=0., + output_stride=32, + global_pool='avg', + norm_layer='batchnorm2d', + norm_eps=1e-3, + act_layer='relu', + ): + super(InceptionResnetV2, self).__init__() + self.num_classes = num_classes + self.num_features = self.head_hidden_size = 1536 + assert output_stride == 32 + conv_block = partial( + ConvNormAct, + padding=0, + norm_layer=norm_layer, + act_layer=act_layer, + norm_kwargs=dict(eps=norm_eps), + act_kwargs=dict(inplace=True), + ) + + self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2) + self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1) + self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1) + self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] + + self.maxpool_3a = nn.MaxPool2d(3, stride=2) + self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1) + self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1) + self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] + + self.maxpool_5a = nn.MaxPool2d(3, stride=2) + self.mixed_5b = Mixed_5b(conv_block=conv_block) + self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)]) + self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] + + self.mixed_6a = Mixed_6a(conv_block=conv_block) + self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)]) + self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] + + self.mixed_7a = Mixed_7a(conv_block=conv_block) + self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)]) + + self.block8 = Block8(no_relu=True, conv_block=conv_block) + self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1) + self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] + + self.global_pool, self.head_drop, self.classif = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} + module_map.pop(('classif',)) + + def _matcher(name): + if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): + return 0 + elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): + return 1 + elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): + return len(module_map) + 1 + else: + for k in module_map.keys(): + if k == tuple(name.split('.')[:len(k)]): + return module_map[k] + return float('inf') + return _matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, "checkpointing not supported" + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classif + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv2d_1a(x) + x = self.conv2d_2a(x) + x = self.conv2d_2b(x) + x = self.maxpool_3a(x) + x = self.conv2d_3b(x) + x = self.conv2d_4a(x) + x = self.maxpool_5a(x) + x = self.mixed_5b(x) + x = self.repeat(x) + x = self.mixed_6a(x) + x = self.repeat_1(x) + x = self.mixed_7a(x) + x = self.repeat_2(x) + x = self.block8(x) + x = self.conv2d_7b(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.classif(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): + return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) + + +default_cfgs = generate_default_cfgs({ + # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz + 'inception_resnet_v2.tf_in1k': { + 'hf_hub_id': 'timm/', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + }, + # As per https://arxiv.org/abs/1705.07204 and + # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz + 'inception_resnet_v2.tf_ens_adv_in1k': { + 'hf_hub_id': 'timm/', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + } +}) + + +@register_model +def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2: + return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) + + +register_model_deprecations(__name__, { + 'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k', +}) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/inception_v3.py b/pytorch-image-models/timm/models/inception_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..8cb1a151dffaea77e449eaf1d1fde742a4243805 --- /dev/null +++ b/pytorch-image-models/timm/models/inception_v3.py @@ -0,0 +1,458 @@ +""" Inception-V3 + +Originally from torchvision Inception3 model +Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE +""" +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import trunc_normal_, create_classifier, Linear, ConvNormAct +from ._builder import build_model_with_cfg +from ._builder import resolve_pretrained_cfg +from ._manipulate import flatten_modules +from ._registry import register_model, generate_default_cfgs, register_model_deprecations + +__all__ = ['InceptionV3'] # model_registry will add each entrypoint fn to this + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features, conv_block=None): + super(InceptionA, self).__init__() + conv_block = conv_block or ConvNormAct + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionB, self).__init__() + conv_block = conv_block or ConvNormAct + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7, conv_block=None): + super(InceptionC, self).__init__() + conv_block = conv_block or ConvNormAct + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionD, self).__init__() + conv_block = conv_block or ConvNormAct + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionE, self).__init__() + conv_block = conv_block or ConvNormAct + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + conv_block = conv_block or ConvNormAct + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class InceptionV3(nn.Module): + """Inception-V3 + """ + aux_logits: torch.jit.Final[bool] + + def __init__( + self, + num_classes=1000, + in_chans=3, + drop_rate=0., + global_pool='avg', + aux_logits=False, + norm_layer='batchnorm2d', + norm_eps=1e-3, + act_layer='relu', + ): + super(InceptionV3, self).__init__() + self.num_classes = num_classes + self.aux_logits = aux_logits + conv_block = partial( + ConvNormAct, + padding=0, + norm_layer=norm_layer, + act_layer=act_layer, + norm_kwargs=dict(eps=norm_eps), + act_kwargs=dict(inplace=True), + ) + + self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) + self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) + self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32, conv_block=conv_block) + self.Mixed_5c = InceptionA(256, pool_features=64, conv_block=conv_block) + self.Mixed_5d = InceptionA(288, pool_features=64, conv_block=conv_block) + self.Mixed_6a = InceptionB(288, conv_block=conv_block) + self.Mixed_6b = InceptionC(768, channels_7x7=128, conv_block=conv_block) + self.Mixed_6c = InceptionC(768, channels_7x7=160, conv_block=conv_block) + self.Mixed_6d = InceptionC(768, channels_7x7=160, conv_block=conv_block) + self.Mixed_6e = InceptionC(768, channels_7x7=192, conv_block=conv_block) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes, conv_block=conv_block) + else: + self.AuxLogits = None + self.Mixed_7a = InceptionD(768, conv_block=conv_block) + self.Mixed_7b = InceptionE(1280, conv_block=conv_block) + self.Mixed_7c = InceptionE(2048, conv_block=conv_block) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), + dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), + dict(num_chs=288, reduction=8, module='Mixed_5d'), + dict(num_chs=768, reduction=16, module='Mixed_6e'), + dict(num_chs=2048, reduction=32, module='Mixed_7c'), + ] + + self.num_features = self.head_hidden_size = 2048 + self.global_pool, self.head_drop, self.fc = create_classifier( + self.num_features, + self.num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + trunc_normal_(m.weight, std=stddev) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} + module_map.pop(('fc',)) + + def _matcher(name): + if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]): + return 0 + elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]): + return 1 + else: + for k in module_map.keys(): + if k == tuple(name.split('.')[:len(k)]): + return module_map[k] + return float('inf') + return _matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.fc + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_preaux(self, x): + x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147 + x = self.Pool1(x) # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71 + x = self.Pool2(x) # N x 192 x 35 x 35 + x = self.Mixed_5b(x) # N x 256 x 35 x 35 + x = self.Mixed_5c(x) # N x 288 x 35 x 35 + x = self.Mixed_5d(x) # N x 288 x 35 x 35 + x = self.Mixed_6a(x) # N x 768 x 17 x 17 + x = self.Mixed_6b(x) # N x 768 x 17 x 17 + x = self.Mixed_6c(x) # N x 768 x 17 x 17 + x = self.Mixed_6d(x) # N x 768 x 17 x 17 + x = self.Mixed_6e(x) # N x 768 x 17 x 17 + return x + + def forward_postaux(self, x): + x = self.Mixed_7a(x) # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) # N x 2048 x 8 x 8 + return x + + def forward_features(self, x): + x = self.forward_preaux(x) + if self.aux_logits: + aux = self.AuxLogits(x) + x = self.forward_postaux(x) + return x, aux + x = self.forward_postaux(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + if pre_logits: + return x + x = self.fc(x) + return x + + def forward(self, x): + if self.aux_logits: + x, aux = self.forward_features(x) + x = self.forward_head(x) + return x, aux + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_inception_v3(variant, pretrained=False, **kwargs): + pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None)) + aux_logits = kwargs.get('aux_logits', False) + has_aux_logits = False + if pretrained_cfg: + # only torchvision pretrained weights have aux logits + has_aux_logits = pretrained_cfg.tag == 'tv_in1k' + if aux_logits: + assert not kwargs.pop('features_only', False) + load_strict = has_aux_logits + else: + load_strict = not has_aux_logits + + return build_model_with_cfg( + InceptionV3, + variant, + pretrained, + pretrained_cfg=pretrained_cfg, + pretrained_strict=load_strict, + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # original PyTorch weights, ported from Tensorflow but modified + 'inception_v3.tv_in1k': _cfg( + # NOTE checkpoint has aux logit layer weights + hf_hub_id='timm/', + url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'), + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + 'inception_v3.tf_in1k': _cfg(hf_hub_id='timm/'), + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + 'inception_v3.tf_adv_in1k': _cfg(hf_hub_id='timm/'), + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + 'inception_v3.gluon_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults + std=IMAGENET_DEFAULT_STD, # also works well with inception defaults + ) +}) + + +@register_model +def inception_v3(pretrained=False, **kwargs) -> InceptionV3: + model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) + return model + + +register_model_deprecations(__name__, { + 'tf_inception_v3': 'inception_v3.tf_in1k', + 'adv_inception_v3': 'inception_v3.tf_adv_in1k', + 'gluon_inception_v3': 'inception_v3.gluon_in1k', +}) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/inception_v4.py b/pytorch-image-models/timm/models/inception_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..a435533fd45172d3ef568443daa2b8062e76ba65 --- /dev/null +++ b/pytorch-image-models/timm/models/inception_v4.py @@ -0,0 +1,325 @@ +""" Pytorch Inception-V4 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import create_classifier, ConvNormAct +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['InceptionV4'] + + +class Mixed3a(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(Mixed3a, self).__init__() + self.maxpool = nn.MaxPool2d(3, stride=2) + self.conv = conv_block(64, 96, kernel_size=3, stride=2) + + def forward(self, x): + x0 = self.maxpool(x) + x1 = self.conv(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed4a(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(Mixed4a, self).__init__() + + self.branch0 = nn.Sequential( + conv_block(160, 64, kernel_size=1, stride=1), + conv_block(64, 96, kernel_size=3, stride=1) + ) + + self.branch1 = nn.Sequential( + conv_block(160, 64, kernel_size=1, stride=1), + conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), + conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), + conv_block(64, 96, kernel_size=(3, 3), stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed5a(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(Mixed5a, self).__init__() + self.conv = conv_block(192, 192, kernel_size=3, stride=2) + self.maxpool = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.conv(x) + x1 = self.maxpool(x) + out = torch.cat((x0, x1), 1) + return out + + +class InceptionA(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(InceptionA, self).__init__() + self.branch0 = conv_block(384, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + conv_block(384, 64, kernel_size=1, stride=1), + conv_block(64, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + conv_block(384, 64, kernel_size=1, stride=1), + conv_block(64, 96, kernel_size=3, stride=1, padding=1), + conv_block(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + conv_block(384, 96, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionA(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(ReductionA, self).__init__() + self.branch0 = conv_block(384, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + conv_block(384, 192, kernel_size=1, stride=1), + conv_block(192, 224, kernel_size=3, stride=1, padding=1), + conv_block(224, 256, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionB(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(InceptionB, self).__init__() + self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + conv_block(1024, 192, kernel_size=1, stride=1), + conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.branch2 = nn.Sequential( + conv_block(1024, 192, kernel_size=1, stride=1), + conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), + conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), + conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + conv_block(1024, 128, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionB(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(ReductionB, self).__init__() + + self.branch0 = nn.Sequential( + conv_block(1024, 192, kernel_size=1, stride=1), + conv_block(192, 192, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + conv_block(1024, 256, kernel_size=1, stride=1), + conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), + conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), + conv_block(320, 320, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionC(nn.Module): + def __init__(self, conv_block=ConvNormAct): + super(InceptionC, self).__init__() + + self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1) + + self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1) + self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1) + self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) + self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + conv_block(1536, 256, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + + x1_0 = self.branch1_0(x) + x1_1a = self.branch1_1a(x1_0) + x1_1b = self.branch1_1b(x1_0) + x1 = torch.cat((x1_1a, x1_1b), 1) + + x2_0 = self.branch2_0(x) + x2_1 = self.branch2_1(x2_0) + x2_2 = self.branch2_2(x2_1) + x2_3a = self.branch2_3a(x2_2) + x2_3b = self.branch2_3b(x2_2) + x2 = torch.cat((x2_3a, x2_3b), 1) + + x3 = self.branch3(x) + + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class InceptionV4(nn.Module): + def __init__( + self, + num_classes=1000, + in_chans=3, + output_stride=32, + drop_rate=0., + global_pool='avg', + norm_layer='batchnorm2d', + norm_eps=1e-3, + act_layer='relu', + ): + super(InceptionV4, self).__init__() + assert output_stride == 32 + self.num_classes = num_classes + self.num_features = self.head_hidden_size = 1536 + conv_block = partial( + ConvNormAct, + padding=0, + norm_layer=norm_layer, + act_layer=act_layer, + norm_kwargs=dict(eps=norm_eps), + act_kwargs=dict(inplace=True), + ) + + features = [ + conv_block(in_chans, 32, kernel_size=3, stride=2), + conv_block(32, 32, kernel_size=3, stride=1), + conv_block(32, 64, kernel_size=3, stride=1, padding=1), + Mixed3a(conv_block), + Mixed4a(conv_block), + Mixed5a(conv_block), + ] + features += [InceptionA(conv_block) for _ in range(4)] + features += [ReductionA(conv_block)] # Mixed6a + features += [InceptionB(conv_block) for _ in range(7)] + features += [ReductionB(conv_block)] # Mixed7a + features += [InceptionC(conv_block) for _ in range(3)] + self.features = nn.Sequential(*features) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='features.2'), + dict(num_chs=160, reduction=4, module='features.3'), + dict(num_chs=384, reduction=8, module='features.9'), + dict(num_chs=1024, reduction=16, module='features.17'), + dict(num_chs=1536, reduction=32, module='features.21'), + ] + self.global_pool, self.head_drop, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^features\.[012]\.', + blocks=r'^features\.(\d+)' + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.last_linear + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.last_linear(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4: + return build_model_with_cfg( + InceptionV4, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True), + **kwargs, + ) + + +default_cfgs = generate_default_cfgs({ + 'inception_v4.tf_in1k': { + 'hf_hub_id': 'timm/', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'features.0.conv', 'classifier': 'last_linear', + } +}) + + +@register_model +def inception_v4(pretrained=False, **kwargs): + return _create_inception_v4('inception_v4', pretrained, **kwargs) diff --git a/pytorch-image-models/timm/models/layers/__init__.py b/pytorch-image-models/timm/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..956f39aa59d8a272386afac213051ee884397903 --- /dev/null +++ b/pytorch-image-models/timm/models/layers/__init__.py @@ -0,0 +1,48 @@ +# NOTE timm.models.layers is DEPRECATED, please use timm.layers, this is here to reduce breakages in transition +from timm.layers.activations import * +from timm.layers.adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from timm.layers.attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding +from timm.layers.blur_pool import BlurPool2d +from timm.layers.classifier import ClassifierHead, create_classifier +from timm.layers.cond_conv2d import CondConv2d, get_condconv_initializer +from timm.layers.config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from timm.layers.conv2d_same import Conv2dSame, conv2d_same +from timm.layers.conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct +from timm.layers.create_act import create_act_layer, get_act_layer, get_act_fn +from timm.layers.create_attn import get_attn, create_attn +from timm.layers.create_conv2d import create_conv2d +from timm.layers.create_norm import get_norm_layer, create_norm_layer +from timm.layers.create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer +from timm.layers.drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from timm.layers.eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from timm.layers.evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ + EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a +from timm.layers.fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm +from timm.layers.filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d +from timm.layers.gather_excite import GatherExcite +from timm.layers.global_context import GlobalContext +from timm.layers.helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple +from timm.layers.inplace_abn import InplaceAbn +from timm.layers.linear import Linear +from timm.layers.mixed_conv2d import MixedConv2d +from timm.layers.mlp import Mlp, GluMlp, GatedMlp, ConvMlp +from timm.layers.non_local_attn import NonLocalAttn, BatNonLocalAttn +from timm.layers.norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d +from timm.layers.norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm +from timm.layers.padding import get_padding, get_same_padding, pad_same +from timm.layers.patch_embed import PatchEmbed +from timm.layers.pool2d_same import AvgPool2dSame, create_pool2d +from timm.layers.squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from timm.layers.selective_kernel import SelectiveKernel +from timm.layers.separable_conv import SeparableConv2d, SeparableConvNormAct +from timm.layers.split_attn import SplitAttn +from timm.layers.split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from timm.layers.std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from timm.layers.test_time_pool import TestTimePoolHead, apply_test_time_pool +from timm.layers.trace_utils import _assert, _float_to_int +from timm.layers.weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_ + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", FutureWarning) diff --git a/pytorch-image-models/timm/models/levit.py b/pytorch-image-models/timm/models/levit.py new file mode 100644 index 0000000000000000000000000000000000000000..16186cae7aab97539258f757f5a5f3ca033345bc --- /dev/null +++ b/pytorch-image-models/timm/models/levit.py @@ -0,0 +1,997 @@ +""" LeViT + +Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference` + - https://arxiv.org/abs/2104.01136 + +@article{graham2021levit, + title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, + author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze}, + journal={arXiv preprint arXiv:22104.01136}, + year={2021} +} + +Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow. + +This version combines both conv/linear models and fixes torchscript compatibility. + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" + +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +# Modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# Copyright 2020 Ross Wightman, Apache-2.0 License +from collections import OrderedDict +from functools import partial +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from timm.layers import to_ntuple, to_2tuple, get_act_layer, DropPath, trunc_normal_, ndgrid +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['Levit'] + + +class ConvNorm(nn.Module): + def __init__( + self, in_chs, out_chs, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bn_weight_init=1): + super().__init__() + self.linear = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = nn.BatchNorm2d(out_chs) + + nn.init.constant_(self.bn.weight, bn_weight_init) + + @torch.no_grad() + def fuse(self): + c, bn = self.linear, self.bn + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Conv2d( + w.size(1), w.size(0), w.shape[2:], stride=self.linear.stride, + padding=self.linear.padding, dilation=self.linear.dilation, groups=self.linear.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + return self.bn(self.linear(x)) + + +class LinearNorm(nn.Module): + def __init__(self, in_features, out_features, bn_weight_init=1): + super().__init__() + self.linear = nn.Linear(in_features, out_features, bias=False) + self.bn = nn.BatchNorm1d(out_features) + + nn.init.constant_(self.bn.weight, bn_weight_init) + + @torch.no_grad() + def fuse(self): + l, bn = self.linear, self.bn + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[:, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + x = self.linear(x) + return self.bn(x.flatten(0, 1)).reshape_as(x) + + +class NormLinear(nn.Module): + def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.): + super().__init__() + self.bn = nn.BatchNorm1d(in_features) + self.drop = nn.Dropout(drop) + self.linear = nn.Linear(in_features, out_features, bias=bias) + + trunc_normal_(self.linear.weight, std=std) + if self.linear.bias is not None: + nn.init.constant_(self.linear.bias, 0) + + @torch.no_grad() + def fuse(self): + bn, l = self.bn, self.linear + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[None, :] + if l.bias is None: + b = b @ self.linear.weight.T + else: + b = (l.weight @ b[:, None]).view(-1) + self.linear.bias + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + return self.linear(self.drop(self.bn(x))) + + +class Stem8(nn.Sequential): + def __init__(self, in_chs, out_chs, act_layer): + super().__init__() + self.stride = 8 + + self.add_module('conv1', ConvNorm(in_chs, out_chs // 4, 3, stride=2, padding=1)) + self.add_module('act1', act_layer()) + self.add_module('conv2', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1)) + self.add_module('act2', act_layer()) + self.add_module('conv3', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1)) + + +class Stem16(nn.Sequential): + def __init__(self, in_chs, out_chs, act_layer): + super().__init__() + self.stride = 16 + + self.add_module('conv1', ConvNorm(in_chs, out_chs // 8, 3, stride=2, padding=1)) + self.add_module('act1', act_layer()) + self.add_module('conv2', ConvNorm(out_chs // 8, out_chs // 4, 3, stride=2, padding=1)) + self.add_module('act2', act_layer()) + self.add_module('conv3', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1)) + self.add_module('act3', act_layer()) + self.add_module('conv4', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1)) + + +class Downsample(nn.Module): + def __init__(self, stride, resolution, use_pool=False): + super().__init__() + self.stride = stride + self.resolution = to_2tuple(resolution) + self.pool = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) if use_pool else None + + def forward(self, x): + B, N, C = x.shape + x = x.view(B, self.resolution[0], self.resolution[1], C) + if self.pool is not None: + x = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + else: + x = x[:, ::self.stride, ::self.stride] + return x.reshape(B, -1, C) + + +class Attention(nn.Module): + attention_bias_cache: Dict[str, torch.Tensor] + + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4., + resolution=14, + use_conv=False, + act_layer=nn.SiLU, + ): + super().__init__() + ln_layer = ConvNorm if use_conv else LinearNorm + resolution = to_2tuple(resolution) + + self.use_conv = use_conv + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.key_attn_dim = key_dim * num_heads + self.val_dim = int(attn_ratio * key_dim) + self.val_attn_dim = int(attn_ratio * key_dim) * num_heads + + self.qkv = ln_layer(dim, self.val_attn_dim + self.key_attn_dim * 2) + self.proj = nn.Sequential(OrderedDict([ + ('act', act_layer()), + ('ln', ln_layer(self.val_attn_dim, dim, bn_weight_init=0)) + ])) + + self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) + pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) + rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1] + self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) + self.attention_bias_cache = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if torch.jit.is_tracing() or self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): # x (B,C,H,W) + if self.use_conv: + B, C, H, W = x.shape + q, k, v = self.qkv(x).view( + B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.val_dim], dim=2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + else: + B, N, C = x.shape + q, k, v = self.qkv(x).view( + B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 3, 1) + v = v.permute(0, 2, 1, 3) + + attn = q @ k * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) + x = self.proj(x) + return x + + +class AttentionDownsample(nn.Module): + attention_bias_cache: Dict[str, torch.Tensor] + + def __init__( + self, + in_dim, + out_dim, + key_dim, + num_heads=8, + attn_ratio=2.0, + stride=2, + resolution=14, + use_conv=False, + use_pool=False, + act_layer=nn.SiLU, + ): + super().__init__() + resolution = to_2tuple(resolution) + + self.stride = stride + self.resolution = resolution + self.num_heads = num_heads + self.key_dim = key_dim + self.key_attn_dim = key_dim * num_heads + self.val_dim = int(attn_ratio * key_dim) + self.val_attn_dim = self.val_dim * self.num_heads + self.scale = key_dim ** -0.5 + self.use_conv = use_conv + + if self.use_conv: + ln_layer = ConvNorm + sub_layer = partial( + nn.AvgPool2d, + kernel_size=3 if use_pool else 1, padding=1 if use_pool else 0, count_include_pad=False) + else: + ln_layer = LinearNorm + sub_layer = partial(Downsample, resolution=resolution, use_pool=use_pool) + + self.kv = ln_layer(in_dim, self.val_attn_dim + self.key_attn_dim) + self.q = nn.Sequential(OrderedDict([ + ('down', sub_layer(stride=stride)), + ('ln', ln_layer(in_dim, self.key_attn_dim)) + ])) + self.proj = nn.Sequential(OrderedDict([ + ('act', act_layer()), + ('ln', ln_layer(self.val_attn_dim, out_dim)) + ])) + + self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) + k_pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) + q_pos = torch.stack(ndgrid( + torch.arange(0, resolution[0], step=stride), + torch.arange(0, resolution[1], step=stride) + )).flatten(1) + rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1] + self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) + + self.attention_bias_cache = {} # per-device attention_biases cache + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if torch.jit.is_tracing() or self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): + if self.use_conv: + B, C, H, W = x.shape + HH, WW = (H - 1) // self.stride + 1, (W - 1) // self.stride + 1 + k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.val_dim], dim=2) + q = self.q(x).view(B, self.num_heads, self.key_dim, -1) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).reshape(B, self.val_attn_dim, HH, WW) + else: + B, N, C = x.shape + k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.val_dim], dim=3) + k = k.permute(0, 2, 3, 1) # BHCN + v = v.permute(0, 2, 1, 3) # BHNC + q = self.q(x).view(B, -1, self.num_heads, self.key_dim).permute(0, 2, 1, 3) + + attn = q @ k * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, -1, self.val_attn_dim) + x = self.proj(x) + return x + + +class LevitMlp(nn.Module): + """ MLP for Levit w/ normalization + ability to switch btw conv and linear + """ + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + use_conv=False, + act_layer=nn.SiLU, + drop=0. + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + ln_layer = ConvNorm if use_conv else LinearNorm + + self.ln1 = ln_layer(in_features, hidden_features) + self.act = act_layer() + self.drop = nn.Dropout(drop) + self.ln2 = ln_layer(hidden_features, out_features, bn_weight_init=0) + + def forward(self, x): + x = self.ln1(x) + x = self.act(x) + x = self.drop(x) + x = self.ln2(x) + return x + + +class LevitDownsample(nn.Module): + def __init__( + self, + in_dim, + out_dim, + key_dim, + num_heads=8, + attn_ratio=4., + mlp_ratio=2., + act_layer=nn.SiLU, + attn_act_layer=None, + resolution=14, + use_conv=False, + use_pool=False, + drop_path=0., + ): + super().__init__() + attn_act_layer = attn_act_layer or act_layer + + self.attn_downsample = AttentionDownsample( + in_dim=in_dim, + out_dim=out_dim, + key_dim=key_dim, + num_heads=num_heads, + attn_ratio=attn_ratio, + act_layer=attn_act_layer, + resolution=resolution, + use_conv=use_conv, + use_pool=use_pool, + ) + + self.mlp = LevitMlp( + out_dim, + int(out_dim * mlp_ratio), + use_conv=use_conv, + act_layer=act_layer + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = self.attn_downsample(x) + x = x + self.drop_path(self.mlp(x)) + return x + + +class LevitBlock(nn.Module): + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4., + mlp_ratio=2., + resolution=14, + use_conv=False, + act_layer=nn.SiLU, + attn_act_layer=None, + drop_path=0., + ): + super().__init__() + attn_act_layer = attn_act_layer or act_layer + + self.attn = Attention( + dim=dim, + key_dim=key_dim, + num_heads=num_heads, + attn_ratio=attn_ratio, + resolution=resolution, + use_conv=use_conv, + act_layer=attn_act_layer, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = LevitMlp( + dim, + int(dim * mlp_ratio), + use_conv=use_conv, + act_layer=act_layer + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.attn(x)) + x = x + self.drop_path2(self.mlp(x)) + return x + + +class LevitStage(nn.Module): + def __init__( + self, + in_dim, + out_dim, + key_dim, + depth=4, + num_heads=8, + attn_ratio=4.0, + mlp_ratio=4.0, + act_layer=nn.SiLU, + attn_act_layer=None, + resolution=14, + downsample='', + use_conv=False, + drop_path=0., + ): + super().__init__() + resolution = to_2tuple(resolution) + + if downsample: + self.downsample = LevitDownsample( + in_dim, + out_dim, + key_dim=key_dim, + num_heads=in_dim // key_dim, + attn_ratio=4., + mlp_ratio=2., + act_layer=act_layer, + attn_act_layer=attn_act_layer, + resolution=resolution, + use_conv=use_conv, + drop_path=drop_path, + ) + resolution = [(r - 1) // 2 + 1 for r in resolution] + else: + assert in_dim == out_dim + self.downsample = nn.Identity() + + blocks = [] + for _ in range(depth): + blocks += [LevitBlock( + out_dim, + key_dim, + num_heads=num_heads, + attn_ratio=attn_ratio, + mlp_ratio=mlp_ratio, + act_layer=act_layer, + attn_act_layer=attn_act_layer, + resolution=resolution, + use_conv=use_conv, + drop_path=drop_path, + )] + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + x = self.blocks(x) + return x + + +class Levit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + + NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems + w/ train scripts that don't take tuple outputs, + """ + + def __init__( + self, + img_size=224, + in_chans=3, + num_classes=1000, + embed_dim=(192,), + key_dim=64, + depth=(12,), + num_heads=(3,), + attn_ratio=2., + mlp_ratio=2., + stem_backbone=None, + stem_stride=None, + stem_type='s16', + down_op='subsample', + act_layer='hard_swish', + attn_act_layer=None, + use_conv=False, + global_pool='avg', + drop_rate=0., + drop_path_rate=0.): + super().__init__() + act_layer = get_act_layer(act_layer) + attn_act_layer = get_act_layer(attn_act_layer or act_layer) + self.use_conv = use_conv + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = embed_dim[-1] + self.embed_dim = embed_dim + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.feature_info = [] + + num_stages = len(embed_dim) + assert len(depth) == num_stages + num_heads = to_ntuple(num_stages)(num_heads) + attn_ratio = to_ntuple(num_stages)(attn_ratio) + mlp_ratio = to_ntuple(num_stages)(mlp_ratio) + + if stem_backbone is not None: + assert stem_stride >= 2 + self.stem = stem_backbone + stride = stem_stride + else: + assert stem_type in ('s16', 's8') + if stem_type == 's16': + self.stem = Stem16(in_chans, embed_dim[0], act_layer=act_layer) + else: + self.stem = Stem8(in_chans, embed_dim[0], act_layer=act_layer) + stride = self.stem.stride + resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))]) + + in_dim = embed_dim[0] + stages = [] + for i in range(num_stages): + stage_stride = 2 if i > 0 else 1 + stages += [LevitStage( + in_dim, + embed_dim[i], + key_dim, + depth=depth[i], + num_heads=num_heads[i], + attn_ratio=attn_ratio[i], + mlp_ratio=mlp_ratio[i], + act_layer=act_layer, + attn_act_layer=attn_act_layer, + resolution=resolution, + use_conv=use_conv, + downsample=down_op if stage_stride == 2 else '', + drop_path=drop_path_rate + )] + stride *= stage_stride + resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) + self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] + in_dim = embed_dim[i] + self.stages = nn.Sequential(*stages) + + # Classifier head + self.head = NormLinear(embed_dim[-1], num_classes, drop=drop_rate) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int , global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = NormLinear( + self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stages), indices) + + # forward pass + x = self.stem(x) + B, C, H, W = x.shape + if not self.use_conv: + x = x.flatten(2).transpose(1, 2) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.stages + else: + stages = self.stages[:max_index + 1] + for feat_idx, stage in enumerate(stages): + x = stage(x) + if feat_idx in take_indices: + if self.use_conv: + intermediates.append(x) + else: + intermediates.append(x.reshape(B, H, W, -1).permute(0, 3, 1, 2)) + H = (H + 2 - 1) // 2 + W = (W + 2 - 1) // 2 + + if intermediates_only: + return intermediates + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stages), indices) + self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.stem(x) + if not self.use_conv: + x = x.flatten(2).transpose(1, 2) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class LevitDistilled(Levit): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.head_dist = NormLinear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head, self.head_dist + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = NormLinear( + self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity() + self.head_dist = NormLinear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) + if pre_logits: + return x + x, x_dist = self.head(x), self.head_dist(x) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train/finetune, inference average the classifier predictions + return (x + x_dist) / 2 + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + + # filter out attn biases, should not have been persistent + state_dict = {k: v for k, v in state_dict.items() if 'attention_bias_idxs' not in k} + + D = model.state_dict() + out_dict = {} + for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()): + if va.ndim == 4 and vb.ndim == 2: + vb = vb[:, :, None, None] + if va.shape != vb.shape: + # head or first-conv shapes may change for fine-tune + assert 'head' in ka or 'stem.conv1.linear' in ka + out_dict[ka] = vb + + return out_dict + + +model_cfgs = dict( + levit_128s=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), + levit_128=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), + levit_192=dict( + embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), + levit_256=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), + levit_384=dict( + embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), + + # stride-8 stem experiments + levit_384_s8=dict( + embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4), + act_layer='silu', stem_type='s8'), + levit_512_s8=dict( + embed_dim=(512, 640, 896), key_dim=64, num_heads=(8, 10, 14), depth=(4, 4, 4), + act_layer='silu', stem_type='s8'), + + # wider experiments + levit_512=dict( + embed_dim=(512, 768, 1024), key_dim=64, num_heads=(8, 12, 16), depth=(4, 4, 4), act_layer='silu'), + + # deeper experiments + levit_256d=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 8, 6), act_layer='silu'), + levit_512d=dict( + embed_dim=(512, 640, 768), key_dim=64, num_heads=(8, 10, 12), depth=(4, 8, 6), act_layer='silu'), +) + + +def create_levit(variant, cfg_variant=None, pretrained=False, distilled=True, **kwargs): + is_conv = '_conv' in variant + out_indices = kwargs.pop('out_indices', (0, 1, 2)) + if kwargs.get('features_only', False) and not is_conv: + kwargs.setdefault('feature_cls', 'getter') + if cfg_variant is None: + if variant in model_cfgs: + cfg_variant = variant + elif is_conv: + cfg_variant = variant.replace('_conv', '') + + model_cfg = dict(model_cfgs[cfg_variant], **kwargs) + model = build_model_with_cfg( + LevitDistilled if distilled else Levit, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **model_cfg, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.linear', 'classifier': ('head.linear', 'head_dist.linear'), + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # weights in nn.Linear mode + 'levit_128s.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'levit_128.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'levit_192.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'levit_256.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'levit_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + ), + + # weights in nn.Conv2d mode + 'levit_conv_128s.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + pool_size=(4, 4), + ), + 'levit_conv_128.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + pool_size=(4, 4), + ), + 'levit_conv_192.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + pool_size=(4, 4), + ), + 'levit_conv_256.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + pool_size=(4, 4), + ), + 'levit_conv_384.fb_dist_in1k': _cfg( + hf_hub_id='timm/', + pool_size=(4, 4), + ), + + 'levit_384_s8.untrained': _cfg(classifier='head.linear'), + 'levit_512_s8.untrained': _cfg(classifier='head.linear'), + 'levit_512.untrained': _cfg(classifier='head.linear'), + 'levit_256d.untrained': _cfg(classifier='head.linear'), + 'levit_512d.untrained': _cfg(classifier='head.linear'), + + 'levit_conv_384_s8.untrained': _cfg(classifier='head.linear'), + 'levit_conv_512_s8.untrained': _cfg(classifier='head.linear'), + 'levit_conv_512.untrained': _cfg(classifier='head.linear'), + 'levit_conv_256d.untrained': _cfg(classifier='head.linear'), + 'levit_conv_512d.untrained': _cfg(classifier='head.linear'), +}) + + +@register_model +def levit_128s(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_128s', pretrained=pretrained, **kwargs) + + +@register_model +def levit_128(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_128', pretrained=pretrained, **kwargs) + + +@register_model +def levit_192(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_192', pretrained=pretrained, **kwargs) + + +@register_model +def levit_256(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_256', pretrained=pretrained, **kwargs) + + +@register_model +def levit_384(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_384', pretrained=pretrained, **kwargs) + + +@register_model +def levit_384_s8(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_384_s8', pretrained=pretrained, **kwargs) + + +@register_model +def levit_512_s8(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_512_s8', pretrained=pretrained, distilled=False, **kwargs) + + +@register_model +def levit_512(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_512', pretrained=pretrained, distilled=False, **kwargs) + + +@register_model +def levit_256d(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_256d', pretrained=pretrained, distilled=False, **kwargs) + + +@register_model +def levit_512d(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_512d', pretrained=pretrained, distilled=False, **kwargs) + + +@register_model +def levit_conv_128s(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_128s', pretrained=pretrained, use_conv=True, **kwargs) + + +@register_model +def levit_conv_128(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_128', pretrained=pretrained, use_conv=True, **kwargs) + + +@register_model +def levit_conv_192(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_192', pretrained=pretrained, use_conv=True, **kwargs) + + +@register_model +def levit_conv_256(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_256', pretrained=pretrained, use_conv=True, **kwargs) + + +@register_model +def levit_conv_384(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_384', pretrained=pretrained, use_conv=True, **kwargs) + + +@register_model +def levit_conv_384_s8(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_384_s8', pretrained=pretrained, use_conv=True, **kwargs) + + +@register_model +def levit_conv_512_s8(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_512_s8', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) + + +@register_model +def levit_conv_512(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_512', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) + + +@register_model +def levit_conv_256d(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_256d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) + + +@register_model +def levit_conv_512d(pretrained=False, **kwargs) -> Levit: + return create_levit('levit_conv_512d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) + diff --git a/pytorch-image-models/timm/models/mambaout.py b/pytorch-image-models/timm/models/mambaout.py new file mode 100644 index 0000000000000000000000000000000000000000..f53a9cdfc2766d6a4ecba86f26acc7803880a2b3 --- /dev/null +++ b/pytorch-image-models/timm/models/mambaout.py @@ -0,0 +1,642 @@ +""" +MambaOut models for image classification. +Some implementations are modified from: +timm (https://github.com/rwightman/pytorch-image-models), +MetaFormer (https://github.com/sail-sg/metaformer), +InceptionNeXt (https://github.com/sail-sg/inceptionnext) +""" +from collections import OrderedDict +from typing import Optional + +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead, get_act_layer +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + + +class Stem(nn.Module): + r""" Code modified from InternImage: + https://github.com/OpenGVLab/InternImage + """ + + def __init__( + self, + in_chs=3, + out_chs=96, + mid_norm: bool = True, + act_layer=nn.GELU, + norm_layer=LayerNorm, + ): + super().__init__() + self.conv1 = nn.Conv2d( + in_chs, + out_chs // 2, + kernel_size=3, + stride=2, + padding=1 + ) + self.norm1 = norm_layer(out_chs // 2) if mid_norm else None + self.act = act_layer() + self.conv2 = nn.Conv2d( + out_chs // 2, + out_chs, + kernel_size=3, + stride=2, + padding=1 + ) + self.norm2 = norm_layer(out_chs) + + def forward(self, x): + x = self.conv1(x) + if self.norm1 is not None: + x = x.permute(0, 2, 3, 1) + x = self.norm1(x) + x = x.permute(0, 3, 1, 2) + x = self.act(x) + x = self.conv2(x) + x = x.permute(0, 2, 3, 1) + x = self.norm2(x) + return x + + +class DownsampleNormFirst(nn.Module): + + def __init__( + self, + in_chs=96, + out_chs=198, + norm_layer=LayerNorm, + ): + super().__init__() + self.norm = norm_layer(in_chs) + self.conv = nn.Conv2d( + in_chs, + out_chs, + kernel_size=3, + stride=2, + padding=1 + ) + + def forward(self, x): + x = self.norm(x) + x = x.permute(0, 3, 1, 2) + x = self.conv(x) + x = x.permute(0, 2, 3, 1) + return x + + +class Downsample(nn.Module): + + def __init__( + self, + in_chs=96, + out_chs=198, + norm_layer=LayerNorm, + ): + super().__init__() + self.conv = nn.Conv2d( + in_chs, + out_chs, + kernel_size=3, + stride=2, + padding=1 + ) + self.norm = norm_layer(out_chs) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.conv(x) + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + return x + + +class MlpHead(nn.Module): + """ MLP classification head + """ + + def __init__( + self, + in_features, + num_classes=1000, + pool_type='avg', + act_layer=nn.GELU, + mlp_ratio=4, + norm_layer=LayerNorm, + drop_rate=0., + bias=True, + ): + super().__init__() + if mlp_ratio is not None: + hidden_size = int(mlp_ratio * in_features) + else: + hidden_size = None + self.pool_type = pool_type + self.in_features = in_features + self.hidden_size = hidden_size or in_features + + self.norm = norm_layer(in_features) + if hidden_size: + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(in_features, hidden_size)), + ('act', act_layer()), + ('norm', norm_layer(hidden_size)) + ])) + self.num_features = hidden_size + else: + self.num_features = in_features + self.pre_logits = nn.Identity() + + self.fc = nn.Linear(self.num_features, num_classes, bias=bias) if num_classes > 0 else nn.Identity() + self.head_dropout = nn.Dropout(drop_rate) + + def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False): + if pool_type is not None: + self.pool_type = pool_type + if reset_other: + self.norm = nn.Identity() + self.pre_logits = nn.Identity() + self.num_features = self.in_features + self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x, pre_logits: bool = False): + if self.pool_type == 'avg': + x = x.mean((1, 2)) + x = self.norm(x) + x = self.pre_logits(x) + x = self.head_dropout(x) + if pre_logits: + return x + x = self.fc(x) + return x + + +class GatedConvBlock(nn.Module): + r""" Our implementation of Gated CNN Block: https://arxiv.org/pdf/1612.08083 + Args: + conv_ratio: control the number of channels to conduct depthwise convolution. + Conduct convolution on partial channels can improve paraitcal efficiency. + The idea of partial channels is from ShuffleNet V2 (https://arxiv.org/abs/1807.11164) and + also used by InceptionNeXt (https://arxiv.org/abs/2303.16900) and FasterNet (https://arxiv.org/abs/2303.03667) + """ + + def __init__( + self, + dim, + expansion_ratio=8 / 3, + kernel_size=7, + conv_ratio=1.0, + ls_init_value=None, + norm_layer=LayerNorm, + act_layer=nn.GELU, + drop_path=0., + **kwargs + ): + super().__init__() + self.norm = norm_layer(dim) + hidden = int(expansion_ratio * dim) + self.fc1 = nn.Linear(dim, hidden * 2) + self.act = act_layer() + conv_channels = int(conv_ratio * dim) + self.split_indices = (hidden, hidden - conv_channels, conv_channels) + self.conv = nn.Conv2d( + conv_channels, + conv_channels, + kernel_size=kernel_size, + padding=kernel_size // 2, + groups=conv_channels + ) + self.fc2 = nn.Linear(hidden, dim) + self.ls = LayerScale(dim) if ls_init_value is not None else nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x # [B, H, W, C] + x = self.norm(x) + x = self.fc1(x) + g, i, c = torch.split(x, self.split_indices, dim=-1) + c = c.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W] + c = self.conv(c) + c = c.permute(0, 2, 3, 1) # [B, C, H, W] -> [B, H, W, C] + x = self.fc2(self.act(g) * torch.cat((i, c), dim=-1)) + x = self.ls(x) + x = self.drop_path(x) + return x + shortcut + + +class MambaOutStage(nn.Module): + + def __init__( + self, + dim, + dim_out: Optional[int] = None, + depth: int = 4, + expansion_ratio=8 / 3, + kernel_size=7, + conv_ratio=1.0, + downsample: str = '', + ls_init_value: Optional[float] = None, + norm_layer=LayerNorm, + act_layer=nn.GELU, + drop_path=0., + ): + super().__init__() + dim_out = dim_out or dim + self.grad_checkpointing = False + + if downsample == 'conv': + self.downsample = Downsample(dim, dim_out, norm_layer=norm_layer) + elif downsample == 'conv_nf': + self.downsample = DownsampleNormFirst(dim, dim_out, norm_layer=norm_layer) + else: + assert dim == dim_out + self.downsample = nn.Identity() + + self.blocks = nn.Sequential(*[ + GatedConvBlock( + dim=dim_out, + expansion_ratio=expansion_ratio, + kernel_size=kernel_size, + conv_ratio=conv_ratio, + ls_init_value=ls_init_value, + norm_layer=norm_layer, + act_layer=act_layer, + drop_path=drop_path[j] if isinstance(drop_path, (list, tuple)) else drop_path, + ) + for j in range(depth) + ]) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class MambaOut(nn.Module): + r""" MetaFormer + A PyTorch impl of : `MetaFormer Baselines for Vision` - + https://arxiv.org/abs/2210.13452 + + Args: + in_chans (int): Number of input image channels. Default: 3. + num_classes (int): Number of classes for classification head. Default: 1000. + depths (list or tuple): Number of blocks at each stage. Default: [3, 3, 9, 3]. + dims (int): Feature dimension at each stage. Default: [96, 192, 384, 576]. + downsample_layers: (list or tuple): Downsampling layers before each stage. + drop_path_rate (float): Stochastic depth rate. Default: 0. + output_norm: norm before classifier head. Default: partial(nn.LayerNorm, eps=1e-6). + head_fn: classification head. Default: nn.Linear. + head_dropout (float): dropout for MLP classifier. Default: 0. + """ + + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + depths=(3, 3, 9, 3), + dims=(96, 192, 384, 576), + norm_layer=LayerNorm, + act_layer=nn.GELU, + conv_ratio=1.0, + expansion_ratio=8/3, + kernel_size=7, + stem_mid_norm=True, + ls_init_value=None, + downsample='conv', + drop_path_rate=0., + drop_rate=0., + head_fn='default', + ): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.output_fmt = 'NHWC' + if not isinstance(depths, (list, tuple)): + depths = [depths] # it means the model has only one stage + if not isinstance(dims, (list, tuple)): + dims = [dims] + act_layer = get_act_layer(act_layer) + + num_stage = len(depths) + self.num_stage = num_stage + self.feature_info = [] + + self.stem = Stem( + in_chans, + dims[0], + mid_norm=stem_mid_norm, + act_layer=act_layer, + norm_layer=norm_layer, + ) + prev_dim = dims[0] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + cur = 0 + curr_stride = 4 + self.stages = nn.Sequential() + for i in range(num_stage): + dim = dims[i] + stride = 2 if curr_stride == 2 or i > 0 else 1 + curr_stride *= stride + stage = MambaOutStage( + dim=prev_dim, + dim_out=dim, + depth=depths[i], + kernel_size=kernel_size, + conv_ratio=conv_ratio, + expansion_ratio=expansion_ratio, + downsample=downsample if i > 0 else '', + ls_init_value=ls_init_value, + norm_layer=norm_layer, + act_layer=act_layer, + drop_path=dp_rates[i], + ) + self.stages.append(stage) + prev_dim = dim + # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 + self.feature_info += [dict(num_chs=prev_dim, reduction=curr_stride, module=f'stages.{i}')] + cur += depths[i] + + if head_fn == 'default': + # specific to this model, unusual norm -> pool -> fc -> act -> norm -> fc combo + self.head = MlpHead( + prev_dim, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + norm_layer=norm_layer, + ) + else: + # more typical norm -> pool -> fc -> act -> fc + self.head = ClNormMlpClassifierHead( + prev_dim, + num_classes, + hidden_size=int(prev_dim * 4), + pool_type=global_pool, + norm_layer=norm_layer, + drop_rate=drop_rate, + ) + self.num_features = prev_dim + self.head_hidden_size = self.head.num_features + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2d, nn.Linear)): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.downsample', (0,)), # blocks + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + if 'stem.conv1.weight' in state_dict: + return state_dict + + import re + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('downsample_layers.0.', 'stem.') + k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k) + # remap head names + if k.startswith('norm.'): + # this is moving to head since it's after the pooling + k = k.replace('norm.', 'head.norm.') + elif k.startswith('head.'): + k = k.replace('head.fc1.', 'head.pre_logits.fc.') + k = k.replace('head.norm.', 'head.pre_logits.norm.') + k = k.replace('head.fc2.', 'head.fc.') + out_dict[k] = v + + return out_dict + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'test_input_size': (3, 288, 288), + 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # original weights + 'mambaout_femto.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_kobe.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_tiny.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_small.in1k': _cfg( + hf_hub_id='timm/'), + 'mambaout_base.in1k': _cfg( + hf_hub_id='timm/'), + + # timm experiments below + 'mambaout_small_rw.sw_e450_in1k': _cfg( + hf_hub_id='timm/', + ), + 'mambaout_base_short_rw.sw_e500_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, + ), + 'mambaout_base_tall_rw.sw_e500_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, + ), + 'mambaout_base_wide_rw.sw_e500_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_crop_pct=1.0, + ), + 'mambaout_base_plus_rw.sw_e150_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + ), + 'mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), test_input_size=(3, 384, 384), crop_mode='squash', pool_size=(12, 12), + ), + 'mambaout_base_plus_rw.sw_e150_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + ), + 'test_mambaout': _cfg(input_size=(3, 160, 160), test_input_size=(3, 192, 192), pool_size=(5, 5)), +}) + + +def _create_mambaout(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + MambaOut, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs, + ) + return model + + +# a series of MambaOut models +@register_model +def mambaout_femto(pretrained=False, **kwargs): + model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 192, 288)) + return _create_mambaout('mambaout_femto', pretrained=pretrained, **dict(model_args, **kwargs)) + +# Kobe Memorial Version with 24 Gated CNN blocks +@register_model +def mambaout_kobe(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 15, 3], dims=[48, 96, 192, 288]) + return _create_mambaout('mambaout_kobe', pretrained=pretrained, **dict(model_args, **kwargs)) + +@register_model +def mambaout_tiny(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 9, 3], dims=[96, 192, 384, 576]) + return _create_mambaout('mambaout_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_small(pretrained=False, **kwargs): + model_args = dict(depths=[3, 4, 27, 3], dims=[96, 192, 384, 576]) + return _create_mambaout('mambaout_small', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base(pretrained=False, **kwargs): + model_args = dict(depths=[3, 4, 27, 3], dims=[128, 256, 512, 768]) + return _create_mambaout('mambaout_base', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_small_rw(pretrained=False, **kwargs): + model_args = dict( + depths=[3, 4, 27, 3], + dims=[96, 192, 384, 576], + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + head_fn='norm_mlp', + ) + return _create_mambaout('mambaout_small_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base_short_rw(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 3, 25, 3), + dims=(128, 256, 512, 768), + expansion_ratio=3.0, + conv_ratio=1.25, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + head_fn='norm_mlp', + ) + return _create_mambaout('mambaout_base_short_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base_tall_rw(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 4, 30, 3), + dims=(128, 256, 512, 768), + expansion_ratio=2.5, + conv_ratio=1.25, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + head_fn='norm_mlp', + ) + return _create_mambaout('mambaout_base_tall_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base_wide_rw(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 4, 27, 3), + dims=(128, 256, 512, 768), + expansion_ratio=3.0, + conv_ratio=1.5, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + act_layer='silu', + head_fn='norm_mlp', + ) + return _create_mambaout('mambaout_base_wide_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def mambaout_base_plus_rw(pretrained=False, **kwargs): + model_args = dict( + depths=(3, 4, 30, 3), + dims=(128, 256, 512, 768), + expansion_ratio=3.0, + conv_ratio=1.5, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-6, + act_layer='silu', + head_fn='norm_mlp', + ) + return _create_mambaout('mambaout_base_plus_rw', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def test_mambaout(pretrained=False, **kwargs): + model_args = dict( + depths=(1, 1, 3, 1), + dims=(16, 32, 48, 64), + expansion_ratio=3, + stem_mid_norm=False, + downsample='conv_nf', + ls_init_value=1e-4, + act_layer='silu', + head_fn='norm_mlp', + ) + return _create_mambaout('test_mambaout', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/maxxvit.py b/pytorch-image-models/timm/models/maxxvit.py new file mode 100644 index 0000000000000000000000000000000000000000..e4375b34e5f6daf018c5bde64c00c9d120c6880d --- /dev/null +++ b/pytorch-image-models/timm/models/maxxvit.py @@ -0,0 +1,2402 @@ +""" MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch + +This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch. + +99% of the implementation was done from papers, however last minute some adjustments were made +based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit + +There are multiple sets of models defined for both architectures. Typically, names with a + `_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit. +These configs work well and appear to be a bit faster / lower resource than the paper. + +The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to +match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match. + +Papers: + +MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697 +@article{tu2022maxvit, + title={MaxViT: Multi-Axis Vision Transformer}, + author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, + journal={ECCV}, + year={2022}, +} + +CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803 +@article{DBLP:journals/corr/abs-2106-04803, + author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan}, + title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes}, + journal = {CoRR}, + volume = {abs/2106.04803}, + year = {2021} +} + +Hacked together by / Copyright 2022, Ross Wightman +""" + +import math +from collections import OrderedDict +from dataclasses import dataclass, replace, field +from functools import partial +from typing import Callable, Optional, Union, Tuple, List + +import torch +from torch import nn +from torch.jit import Final + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import Mlp, ConvMlp, DropPath, LayerNorm, ClassifierHead, NormMlpClassifierHead +from timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, create_pool2d +from timm.layers import trunc_normal_tf_, to_2tuple, extend_tuple, make_divisible, _assert +from timm.layers import RelPosMlp, RelPosBias, RelPosBiasTf, use_fused_attn, resize_rel_pos_bias_table +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_function +from ._manipulate import named_apply, checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit'] + + +@dataclass +class MaxxVitTransformerCfg: + dim_head: int = 32 + head_first: bool = True # head ordering in qkv channel dim + expand_ratio: float = 4.0 + expand_first: bool = True + shortcut_bias: bool = True + attn_bias: bool = True + attn_drop: float = 0. + proj_drop: float = 0. + pool_type: str = 'avg2' + rel_pos_type: str = 'bias' + rel_pos_dim: int = 512 # for relative position types w/ MLP + partition_ratio: int = 32 + window_size: Optional[Tuple[int, int]] = None + grid_size: Optional[Tuple[int, int]] = None + no_block_attn: bool = False # disable window block attention for maxvit (ie only grid) + use_nchw_attn: bool = False # for MaxViT variants (not used for CoAt), keep tensors in NCHW order + init_values: Optional[float] = None + act_layer: str = 'gelu' + norm_layer: str = 'layernorm2d' + norm_layer_cl: str = 'layernorm' + norm_eps: float = 1e-6 + + def __post_init__(self): + if self.grid_size is not None: + self.grid_size = to_2tuple(self.grid_size) + if self.window_size is not None: + self.window_size = to_2tuple(self.window_size) + if self.grid_size is None: + self.grid_size = self.window_size + + +@dataclass +class MaxxVitConvCfg: + block_type: str = 'mbconv' + expand_ratio: float = 4.0 + expand_output: bool = True # calculate expansion channels from output (vs input chs) + kernel_size: int = 3 + group_size: int = 1 # 1 == depthwise + pre_norm_act: bool = False # activation after pre-norm + output_bias: bool = True # bias for shortcut + final 1x1 projection conv + stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw' + pool_type: str = 'avg2' + downsample_pool_type: str = 'avg2' + padding: str = '' + attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2 + attn_layer: str = 'se' + attn_act_layer: str = 'silu' + attn_ratio: float = 0.25 + init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv + act_layer: str = 'gelu' + norm_layer: str = '' + norm_layer_cl: str = '' + norm_eps: Optional[float] = None + + def __post_init__(self): + # mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args + assert self.block_type in ('mbconv', 'convnext') + use_mbconv = self.block_type == 'mbconv' + if not self.norm_layer: + self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d' + if not self.norm_layer_cl and not use_mbconv: + self.norm_layer_cl = 'layernorm' + if self.norm_eps is None: + self.norm_eps = 1e-5 if use_mbconv else 1e-6 + self.downsample_pool_type = self.downsample_pool_type or self.pool_type + + +@dataclass +class MaxxVitCfg: + embed_dim: Tuple[int, ...] = (96, 192, 384, 768) + depths: Tuple[int, ...] = (2, 3, 5, 2) + block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T') + stem_width: Union[int, Tuple[int, int]] = 64 + stem_bias: bool = False + conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg) + transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg) + head_hidden_size: int = None + weight_init: str = 'vit_eff' + + +class Attention2d(nn.Module): + fused_attn: Final[bool] + + """ multi-head attention for 2D NCHW tensors""" + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + dim_head: int = 32, + bias: bool = True, + expand_first: bool = True, + head_first: bool = True, + rel_pos_cls: Callable = None, + attn_drop: float = 0., + proj_drop: float = 0. + ): + super().__init__() + dim_out = dim_out or dim + dim_attn = dim_out if expand_first else dim + self.num_heads = dim_attn // dim_head + self.dim_head = dim_head + self.head_first = head_first + self.scale = dim_head ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) + self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + B, C, H, W = x.shape + + if self.head_first: + q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) + else: + q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1) + + if self.fused_attn: + attn_bias = None + if self.rel_pos is not None: + attn_bias = self.rel_pos.get_bias() + elif shared_rel_pos is not None: + attn_bias = shared_rel_pos + + x = torch.nn.functional.scaled_dot_product_attention( + q.transpose(-1, -2).contiguous(), + k.transpose(-1, -2).contiguous(), + v.transpose(-1, -2).contiguous(), + attn_mask=attn_bias, + dropout_p=self.attn_drop.p if self.training else 0., + ).transpose(-1, -2).reshape(B, -1, H, W) + else: + q = q * self.scale + attn = q.transpose(-2, -1) @ k + if self.rel_pos is not None: + attn = self.rel_pos(attn) + elif shared_rel_pos is not None: + attn = attn + shared_rel_pos + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class AttentionCl(nn.Module): + """ Channels-last multi-head attention (B, ..., C) """ + fused_attn: Final[bool] + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + dim_head: int = 32, + bias: bool = True, + expand_first: bool = True, + head_first: bool = True, + rel_pos_cls: Callable = None, + attn_drop: float = 0., + proj_drop: float = 0. + ): + super().__init__() + dim_out = dim_out or dim + dim_attn = dim_out if expand_first and dim_out > dim else dim + assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim' + self.num_heads = dim_attn // dim_head + self.dim_head = dim_head + self.head_first = head_first + self.scale = dim_head ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias) + self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim_attn, dim_out, bias=bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + B = x.shape[0] + restore_shape = x.shape[:-1] + + if self.head_first: + q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3) + else: + q, k, v = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.dim_head).transpose(1, 3).unbind(2) + + if self.fused_attn: + attn_bias = None + if self.rel_pos is not None: + attn_bias = self.rel_pos.get_bias() + elif shared_rel_pos is not None: + attn_bias = shared_rel_pos + + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + attn_mask=attn_bias, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + if self.rel_pos is not None: + attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) + elif shared_rel_pos is not None: + attn = attn + shared_rel_pos + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(restore_shape + (-1,)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma + return x.mul_(gamma) if self.inplace else x * gamma + + +class LayerScale2d(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + + +class Downsample2d(nn.Module): + """ A downsample pooling module supporting several maxpool and avgpool modes + * 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1 + * 'max2' - MaxPool2d w/ kernel_size = stride = 2 + * 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1 + * 'avg2' - AvgPool2d w/ kernel_size = stride = 2 + """ + + def __init__( + self, + dim: int, + dim_out: int, + pool_type: str = 'avg2', + padding: str = '', + bias: bool = True, + ): + super().__init__() + assert pool_type in ('max', 'max2', 'avg', 'avg2') + if pool_type == 'max': + self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=padding or 1) + elif pool_type == 'max2': + self.pool = create_pool2d('max', 2, padding=padding or 0) # kernel_size == stride == 2 + elif pool_type == 'avg': + self.pool = create_pool2d( + 'avg', kernel_size=3, stride=2, count_include_pad=False, padding=padding or 1) + else: + self.pool = create_pool2d('avg', 2, padding=padding or 0) + + if dim != dim_out: + self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) + else: + self.expand = nn.Identity() + + def forward(self, x): + x = self.pool(x) # spatial downsample + x = self.expand(x) # expand chs + return x + + +def _init_transformer(module, name, scheme=''): + if isinstance(module, (nn.Conv2d, nn.Linear)): + if scheme == 'normal': + nn.init.normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'trunc_normal': + trunc_normal_tf_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'xavier_normal': + nn.init.xavier_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # vit like + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + + +class TransformerBlock2d(nn.Module): + """ Transformer block with 2D downsampling + '2D' NCHW tensor layout + + Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW + for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs. + + This impl was faster on TPU w/ PT XLA than the 1D experiment. + """ + + def __init__( + self, + dim: int, + dim_out: int, + stride: int = 1, + rel_pos_cls: Callable = None, + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) + act_layer = get_act_layer(cfg.act_layer) + + if stride == 2: + self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias) + self.norm1 = nn.Sequential(OrderedDict([ + ('norm', norm_layer(dim)), + ('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)), + ])) + else: + assert dim == dim_out + self.shortcut = nn.Identity() + self.norm1 = norm_layer(dim) + + self.attn = Attention2d( + dim, + dim_out, + dim_head=cfg.dim_head, + expand_first=cfg.expand_first, + bias=cfg.attn_bias, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop + ) + self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim_out) + self.mlp = ConvMlp( + in_features=dim_out, + hidden_features=int(dim_out * cfg.expand_ratio), + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def init_weights(self, scheme=''): + named_apply(partial(_init_transformer, scheme=scheme), self) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +def _init_conv(module, name, scheme=''): + if isinstance(module, nn.Conv2d): + if scheme == 'normal': + nn.init.normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'trunc_normal': + trunc_normal_tf_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'xavier_normal': + nn.init.xavier_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # efficientnet like + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +class MbConvBlock(nn.Module): + """ Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand) + """ + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + drop_path: float = 0. + ): + super(MbConvBlock, self).__init__() + norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps) + mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio) + groups = num_groups(cfg.group_size, mid_chs) + + if stride == 2: + self.shortcut = Downsample2d( + in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias, padding=cfg.padding) + else: + self.shortcut = nn.Identity() + + assert cfg.stride_mode in ('pool', '1x1', 'dw') + stride_pool, stride_1, stride_2 = 1, 1, 1 + if cfg.stride_mode == 'pool': + # NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1 + stride_pool, dilation_2 = stride, dilation[1] + # FIXME handle dilation of avg pool + elif cfg.stride_mode == '1x1': + # NOTE I don't like this option described in paper, 1x1 w/ stride throws info away + stride_1, dilation_2 = stride, dilation[1] + else: + stride_2, dilation_2 = stride, dilation[0] + + self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act) + if stride_pool > 1: + self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type, padding=cfg.padding) + else: + self.down = nn.Identity() + self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1) + self.norm1 = norm_act_layer(mid_chs) + + self.conv2_kxk = create_conv2d( + mid_chs, mid_chs, cfg.kernel_size, + stride=stride_2, dilation=dilation_2, groups=groups, padding=cfg.padding) + + attn_kwargs = {} + if isinstance(cfg.attn_layer, str): + if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca': + attn_kwargs['act_layer'] = cfg.attn_act_layer + attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs)) + + # two different orderings for SE and norm2 (due to some weights and trials using SE before norm2) + if cfg.attn_early: + self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) + self.norm2 = norm_act_layer(mid_chs) + self.se = None + else: + self.se_early = None + self.norm2 = norm_act_layer(mid_chs) + self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) + + self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def init_weights(self, scheme=''): + named_apply(partial(_init_conv, scheme=scheme), self) + + def forward(self, x): + shortcut = self.shortcut(x) + x = self.pre_norm(x) + x = self.down(x) + + # 1x1 expansion conv & norm-act + x = self.conv1_1x1(x) + x = self.norm1(x) + + # depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act + x = self.conv2_kxk(x) + if self.se_early is not None: + x = self.se_early(x) + x = self.norm2(x) + if self.se is not None: + x = self.se(x) + + # 1x1 linear projection to output width + x = self.conv3_1x1(x) + x = self.drop_path(x) + shortcut + return x + + +class ConvNeXtBlock(nn.Module): + """ ConvNeXt Block + """ + + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + kernel_size: int = 7, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + conv_mlp: bool = True, + drop_path: float = 0. + ): + super().__init__() + out_chs = out_chs or in_chs + act_layer = get_act_layer(cfg.act_layer) + if conv_mlp: + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) + mlp_layer = ConvMlp + else: + assert 'layernorm' in cfg.norm_layer + norm_layer = LayerNorm + mlp_layer = Mlp + self.use_conv_mlp = conv_mlp + + if stride == 2: + self.shortcut = Downsample2d(in_chs, out_chs) + elif in_chs != out_chs: + self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias) + else: + self.shortcut = nn.Identity() + + assert cfg.stride_mode in ('pool', 'dw') + stride_pool, stride_dw = 1, 1 + # FIXME handle dilation? + if cfg.stride_mode == 'pool': + stride_pool = stride + else: + stride_dw = stride + + if stride_pool == 2: + self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) + else: + self.down = nn.Identity() + + self.conv_dw = create_conv2d( + in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1], + depthwise=True, bias=cfg.output_bias) + self.norm = norm_layer(out_chs) + self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer) + if conv_mlp: + self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() + else: + self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = self.shortcut(x) + x = self.down(x) + x = self.conv_dw(x) + if self.use_conv_mlp: + x = self.norm(x) + x = self.mlp(x) + x = self.ls(x) + else: + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + x = self.mlp(x) + x = self.ls(x) + x = x.permute(0, 3, 1, 2) + + x = self.drop_path(x) + shortcut + return x + + +def window_partition(x, window_size: List[int]): + B, H, W, C = x.shape + _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') + _assert(W % window_size[1] == 0, f'width ({W}) must be divisible by window ({window_size[1]})') + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[-1] + x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) + return x + + +def grid_partition(x, grid_size: List[int]): + B, H, W, C = x.shape + _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') + _assert(W % grid_size[1] == 0, f'width {W} must be divisible by grid {grid_size[1]}') + x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C) + windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def grid_reverse(windows, grid_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[-1] + x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C) + x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C) + return x + + +def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size): + rel_pos_cls = None + if cfg.rel_pos_type == 'mlp': + rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim) + elif cfg.rel_pos_type == 'bias': + rel_pos_cls = partial(RelPosBias, window_size=window_size) + elif cfg.rel_pos_type == 'bias_tf': + rel_pos_cls = partial(RelPosBiasTf, window_size=window_size) + return rel_pos_cls + + +class PartitionAttentionCl(nn.Module): + """ Grid or Block partition + Attn + FFN. + NxC 'channels last' tensor layout. + """ + + def __init__( + self, + dim: int, + partition_type: str = 'block', + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last + act_layer = get_act_layer(cfg.act_layer) + + self.partition_block = partition_type == 'block' + self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) + rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) + + self.norm1 = norm_layer(dim) + self.attn = AttentionCl( + dim, + dim, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + head_first=cfg.head_first, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * cfg.expand_ratio), + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _partition_attn(self, x): + img_size = x.shape[1:3] + if self.partition_block: + partitioned = window_partition(x, self.partition_size) + else: + partitioned = grid_partition(x, self.partition_size) + + partitioned = self.attn(partitioned) + + if self.partition_block: + x = window_reverse(partitioned, self.partition_size, img_size) + else: + x = grid_reverse(partitioned, self.partition_size, img_size) + return x + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class ParallelPartitionAttention(nn.Module): + """ Experimental. Grid and Block partition + single FFN + NxC tensor layout. + """ + + def __init__( + self, + dim: int, + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + assert dim % 2 == 0 + norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last + act_layer = get_act_layer(cfg.act_layer) + + assert cfg.window_size == cfg.grid_size + self.partition_size = to_2tuple(cfg.window_size) + rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) + + self.norm1 = norm_layer(dim) + self.attn_block = AttentionCl( + dim, + dim // 2, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + head_first=cfg.head_first, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.attn_grid = AttentionCl( + dim, + dim // 2, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + head_first=cfg.head_first, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * cfg.expand_ratio), + out_features=dim, + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _partition_attn(self, x): + img_size = x.shape[1:3] + + partitioned_block = window_partition(x, self.partition_size) + partitioned_block = self.attn_block(partitioned_block) + x_window = window_reverse(partitioned_block, self.partition_size, img_size) + + partitioned_grid = grid_partition(x, self.partition_size) + partitioned_grid = self.attn_grid(partitioned_grid) + x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size) + + return torch.cat([x_window, x_grid], dim=-1) + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +def window_partition_nchw(x, window_size: List[int]): + B, C, H, W = x.shape + _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') + _assert(W % window_size[1] == 0, f'width ({W}) must be divisible by window ({window_size[1]})') + x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1]) + windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1]) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[1] + x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1]) + x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W) + return x + + +def grid_partition_nchw(x, grid_size: List[int]): + B, C, H, W = x.shape + _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') + _assert(W % grid_size[1] == 0, f'width {W} must be divisible by grid {grid_size[1]}') + x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1]) + windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1]) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[1] + x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1]) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W) + return x + + +class PartitionAttention2d(nn.Module): + """ Grid or Block partition + Attn + FFN + + '2D' NCHW tensor layout. + """ + + def __init__( + self, + dim: int, + partition_type: str = 'block', + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last + act_layer = get_act_layer(cfg.act_layer) + + self.partition_block = partition_type == 'block' + self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) + rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) + + self.norm1 = norm_layer(dim) + self.attn = Attention2d( + dim, + dim, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + head_first=cfg.head_first, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = ConvMlp( + in_features=dim, + hidden_features=int(dim * cfg.expand_ratio), + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _partition_attn(self, x): + img_size = x.shape[-2:] + if self.partition_block: + partitioned = window_partition_nchw(x, self.partition_size) + else: + partitioned = grid_partition_nchw(x, self.partition_size) + + partitioned = self.attn(partitioned) + + if self.partition_block: + x = window_reverse_nchw(partitioned, self.partition_size, img_size) + else: + x = grid_reverse_nchw(partitioned, self.partition_size, img_size) + return x + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class MaxxVitBlock(nn.Module): + """ MaxVit conv, window partition + FFN , grid partition + FFN + """ + + def __init__( + self, + dim: int, + dim_out: int, + stride: int = 1, + conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + self.nchw_attn = transformer_cfg.use_nchw_attn + + conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock + self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) + + attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) + partition_layer = PartitionAttention2d if self.nchw_attn else PartitionAttentionCl + self.attn_block = None if transformer_cfg.no_block_attn else partition_layer(**attn_kwargs) + self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs) + + def init_weights(self, scheme=''): + if self.attn_block is not None: + named_apply(partial(_init_transformer, scheme=scheme), self.attn_block) + named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid) + named_apply(partial(_init_conv, scheme=scheme), self.conv) + + def forward(self, x): + # NCHW format + x = self.conv(x) + + if not self.nchw_attn: + x = x.permute(0, 2, 3, 1) # to NHWC (channels-last) + if self.attn_block is not None: + x = self.attn_block(x) + x = self.attn_grid(x) + if not self.nchw_attn: + x = x.permute(0, 3, 1, 2) # back to NCHW + return x + + +class ParallelMaxxVitBlock(nn.Module): + """ MaxVit block with parallel cat(window + grid), one FF + Experimental timm block. + """ + + def __init__( + self, + dim, + dim_out, + stride=1, + num_conv=2, + conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path=0., + ): + super().__init__() + + conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock + if num_conv > 1: + convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] + convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1) + self.conv = nn.Sequential(*convs) + else: + self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) + self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) + + def init_weights(self, scheme=''): + named_apply(partial(_init_transformer, scheme=scheme), self.attn) + named_apply(partial(_init_conv, scheme=scheme), self.conv) + + def forward(self, x): + x = self.conv(x) + x = x.permute(0, 2, 3, 1) + x = self.attn(x) + x = x.permute(0, 3, 1, 2) + return x + + +class MaxxVitStage(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 2, + depth: int = 4, + feat_size: Tuple[int, int] = (14, 14), + block_types: Union[str, Tuple[str]] = 'C', + transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + drop_path: Union[float, List[float]] = 0., + ): + super().__init__() + self.grad_checkpointing = False + + block_types = extend_tuple(block_types, depth) + blocks = [] + for i, t in enumerate(block_types): + block_stride = stride if i == 0 else 1 + assert t in ('C', 'T', 'M', 'PM') + if t == 'C': + conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock + blocks += [conv_cls( + in_chs, + out_chs, + stride=block_stride, + cfg=conv_cfg, + drop_path=drop_path[i], + )] + elif t == 'T': + rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size) + blocks += [TransformerBlock2d( + in_chs, + out_chs, + stride=block_stride, + rel_pos_cls=rel_pos_cls, + cfg=transformer_cfg, + drop_path=drop_path[i], + )] + elif t == 'M': + blocks += [MaxxVitBlock( + in_chs, + out_chs, + stride=block_stride, + conv_cfg=conv_cfg, + transformer_cfg=transformer_cfg, + drop_path=drop_path[i], + )] + elif t == 'PM': + blocks += [ParallelMaxxVitBlock( + in_chs, + out_chs, + stride=block_stride, + conv_cfg=conv_cfg, + transformer_cfg=transformer_cfg, + drop_path=drop_path[i], + )] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class Stem(nn.Module): + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + padding: str = '', + bias: bool = False, + act_layer: str = 'gelu', + norm_layer: str = 'batchnorm2d', + norm_eps: float = 1e-5, + ): + super().__init__() + if not isinstance(out_chs, (list, tuple)): + out_chs = to_2tuple(out_chs) + + norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) + self.out_chs = out_chs[-1] + self.stride = 2 + + self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2, padding=padding, bias=bias) + self.norm1 = norm_act_layer(out_chs[0]) + self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1, padding=padding, bias=bias) + + def init_weights(self, scheme=''): + named_apply(partial(_init_conv, scheme=scheme), self) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + return x + + +def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]): + if cfg.window_size is not None: + assert cfg.grid_size + return cfg + partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio + cfg = replace(cfg, window_size=partition_size, grid_size=partition_size) + return cfg + + +def _overlay_kwargs(cfg: MaxxVitCfg, **kwargs): + transformer_kwargs = {} + conv_kwargs = {} + base_kwargs = {} + for k, v in kwargs.items(): + if k.startswith('transformer_'): + transformer_kwargs[k.replace('transformer_', '')] = v + elif k.startswith('conv_'): + conv_kwargs[k.replace('conv_', '')] = v + else: + base_kwargs[k] = v + cfg = replace( + cfg, + transformer_cfg=replace(cfg.transformer_cfg, **transformer_kwargs), + conv_cfg=replace(cfg.conv_cfg, **conv_kwargs), + **base_kwargs + ) + return cfg + + +class MaxxVit(nn.Module): + """ CoaTNet + MaxVit base model. + + Highly configurable for different block compositions, tensor layouts, pooling types. + """ + + def __init__( + self, + cfg: MaxxVitCfg, + img_size: Union[int, Tuple[int, int]] = 224, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + drop_rate: float = 0., + drop_path_rate: float = 0., + **kwargs, + ): + super().__init__() + img_size = to_2tuple(img_size) + if kwargs: + cfg = _overlay_kwargs(cfg, **kwargs) + transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size) + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.embed_dim = cfg.embed_dim[-1] + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.feature_info = [] + + self.stem = Stem( + in_chs=in_chans, + out_chs=cfg.stem_width, + padding=cfg.conv_cfg.padding, + bias=cfg.stem_bias, + act_layer=cfg.conv_cfg.act_layer, + norm_layer=cfg.conv_cfg.norm_layer, + norm_eps=cfg.conv_cfg.norm_eps, + ) + stride = self.stem.stride + self.feature_info += [dict(num_chs=self.stem.out_chs, reduction=2, module='stem')] + feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))]) + + num_stages = len(cfg.embed_dim) + assert len(cfg.depths) == num_stages + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + in_chs = self.stem.out_chs + stages = [] + for i in range(num_stages): + stage_stride = 2 + out_chs = cfg.embed_dim[i] + feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size]) + stages += [MaxxVitStage( + in_chs, + out_chs, + depth=cfg.depths[i], + block_types=cfg.block_type[i], + conv_cfg=cfg.conv_cfg, + transformer_cfg=transformer_cfg, + feat_size=feat_size, + drop_path=dpr[i], + )] + stride *= stage_stride + in_chs = out_chs + self.feature_info += [dict(num_chs=out_chs, reduction=stride, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + + final_norm_layer = partial(get_norm_layer(cfg.transformer_cfg.norm_layer), eps=cfg.transformer_cfg.norm_eps) + if cfg.head_hidden_size: + self.norm = nn.Identity() + self.head_hidden_size = cfg.head_hidden_size + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + hidden_size=self.head_hidden_size, + pool_type=global_pool, + drop_rate=drop_rate, + norm_layer=final_norm_layer, + ) + else: + # standard classifier head w/ norm, pooling, fc classifier + self.head_hidden_size = self.num_features + self.norm = final_norm_layer(self.num_features) + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # Weight init (default PyTorch init works well for AdamW if scheme not set) + assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff') + if cfg.weight_init: + named_apply(partial(self._init_weights, scheme=cfg.weight_init), self) + + def _init_weights(self, module, name, scheme=''): + if hasattr(module, 'init_weights'): + try: + module.init_weights(scheme=scheme) + except TypeError: + module.init_weights() + + @torch.jit.ignore + def no_weight_decay(self): + return { + k for k, _ in self.named_parameters() + if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', # stem and embed + blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices) + + # forward pass + feat_idx = 0 # stem is index 0 + x = self.stem(x) + if feat_idx in take_indices: + intermediates.append(x) + + last_idx = len(self.stages) + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.stages + else: + stages = self.stages[:max_index] + for stage in stages: + feat_idx += 1 + x = stage(x) + if feat_idx in take_indices: + if norm and feat_idx == last_idx: + x_inter = self.norm(x) # applying final norm to last intermediate + else: + x_inter = x + intermediates.append(x_inter) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices) + self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.head = self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _rw_coat_cfg( + stride_mode='pool', + pool_type='avg2', + conv_output_bias=False, + conv_attn_early=False, + conv_attn_act_layer='relu', + conv_norm_layer='', + transformer_shortcut_bias=True, + transformer_norm_layer='layernorm2d', + transformer_norm_layer_cl='layernorm', + init_values=None, + rel_pos_type='bias', + rel_pos_dim=512, +): + # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit + # Common differences for initial timm models: + # - pre-norm layer in MZBConv included an activation after norm + # - mbconv expansion calculated from input instead of output chs + # - mbconv shortcut and final 1x1 conv did not have a bias + # - SE act layer was relu, not silu + # - mbconv uses silu in timm, not gelu + # - expansion in attention block done via output proj, not input proj + # Variable differences (evolved over training initial models): + # - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat) + # - SE attention was between conv2 and norm/act + # - default to avg pool for mbconv downsample instead of 1x1 or dw conv + # - transformer block shortcut has no bias + return dict( + conv_cfg=MaxxVitConvCfg( + stride_mode=stride_mode, + pool_type=pool_type, + pre_norm_act=True, + expand_output=False, + output_bias=conv_output_bias, + attn_early=conv_attn_early, + attn_act_layer=conv_attn_act_layer, + act_layer='silu', + norm_layer=conv_norm_layer, + ), + transformer_cfg=MaxxVitTransformerCfg( + expand_first=False, + shortcut_bias=transformer_shortcut_bias, + pool_type=pool_type, + init_values=init_values, + norm_layer=transformer_norm_layer, + norm_layer_cl=transformer_norm_layer_cl, + rel_pos_type=rel_pos_type, + rel_pos_dim=rel_pos_dim, + ), + ) + + +def _rw_max_cfg( + stride_mode='dw', + pool_type='avg2', + conv_output_bias=False, + conv_attn_ratio=1 / 16, + conv_norm_layer='', + transformer_norm_layer='layernorm2d', + transformer_norm_layer_cl='layernorm', + window_size=None, + dim_head=32, + init_values=None, + rel_pos_type='bias', + rel_pos_dim=512, +): + # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit + # Differences of initial timm models: + # - mbconv expansion calculated from input instead of output chs + # - mbconv shortcut and final 1x1 conv did not have a bias + # - mbconv uses silu in timm, not gelu + # - expansion in attention block done via output proj, not input proj + return dict( + conv_cfg=MaxxVitConvCfg( + stride_mode=stride_mode, + pool_type=pool_type, + expand_output=False, + output_bias=conv_output_bias, + attn_ratio=conv_attn_ratio, + act_layer='silu', + norm_layer=conv_norm_layer, + ), + transformer_cfg=MaxxVitTransformerCfg( + expand_first=False, + pool_type=pool_type, + dim_head=dim_head, + window_size=window_size, + init_values=init_values, + norm_layer=transformer_norm_layer, + norm_layer_cl=transformer_norm_layer_cl, + rel_pos_type=rel_pos_type, + rel_pos_dim=rel_pos_dim, + ), + ) + + +def _next_cfg( + stride_mode='dw', + pool_type='avg2', + conv_norm_layer='layernorm2d', + conv_norm_layer_cl='layernorm', + transformer_norm_layer='layernorm2d', + transformer_norm_layer_cl='layernorm', + window_size=None, + no_block_attn=False, + init_values=1e-6, + rel_pos_type='mlp', # MLP by default for maxxvit + rel_pos_dim=512, +): + # For experimental models with convnext instead of mbconv + init_values = to_2tuple(init_values) + return dict( + conv_cfg=MaxxVitConvCfg( + block_type='convnext', + stride_mode=stride_mode, + pool_type=pool_type, + expand_output=False, + init_values=init_values[0], + norm_layer=conv_norm_layer, + norm_layer_cl=conv_norm_layer_cl, + ), + transformer_cfg=MaxxVitTransformerCfg( + expand_first=False, + pool_type=pool_type, + window_size=window_size, + no_block_attn=no_block_attn, # enabled for MaxxViT-V2 + init_values=init_values[1], + norm_layer=transformer_norm_layer, + norm_layer_cl=transformer_norm_layer_cl, + rel_pos_type=rel_pos_type, + rel_pos_dim=rel_pos_dim, + ), + ) + + +def _tf_cfg(): + return dict( + conv_cfg=MaxxVitConvCfg( + norm_eps=1e-3, + act_layer='gelu_tanh', + padding='same', + ), + transformer_cfg=MaxxVitTransformerCfg( + norm_eps=1e-5, + act_layer='gelu_tanh', + head_first=False, # heads are interleaved (q_nh, q_hdim, k_nh, q_hdim, ....) + rel_pos_type='bias_tf', + ), + ) + + +model_cfgs = dict( + # timm specific CoAtNet configs + coatnet_pico_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 3, 5, 2), + stem_width=(32, 64), + **_rw_max_cfg( # using newer max defaults here + conv_output_bias=True, + conv_attn_ratio=0.25, + ), + ), + coatnet_nano_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + **_rw_max_cfg( # using newer max defaults here + stride_mode='pool', + conv_output_bias=True, + conv_attn_ratio=0.25, + ), + ), + coatnet_0_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 7, 2), # deeper than paper '0' model + stem_width=(32, 64), + **_rw_coat_cfg( + conv_attn_early=True, + transformer_shortcut_bias=False, + ), + ), + coatnet_1_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + stem_width=(32, 64), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_early=True, + transformer_shortcut_bias=False, + ) + ), + coatnet_2_rw=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + stem_width=(64, 128), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + #init_values=1e-6, + ), + ), + coatnet_3_rw=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + stem_width=(96, 192), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + init_values=1e-6, + ), + ), + + # Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos) + coatnet_bn_0_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 7, 2), # deeper than paper '0' model + stem_width=(32, 64), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_early=True, + transformer_shortcut_bias=False, + transformer_norm_layer='batchnorm2d', + ) + ), + coatnet_rmlp_nano_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + **_rw_max_cfg( + conv_output_bias=True, + conv_attn_ratio=0.25, + rel_pos_type='mlp', + rel_pos_dim=384, + ), + ), + coatnet_rmlp_0_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 7, 2), # deeper than paper '0' model + stem_width=(32, 64), + **_rw_coat_cfg( + stride_mode='dw', + rel_pos_type='mlp', + ), + ), + coatnet_rmlp_1_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + stem_width=(32, 64), + **_rw_coat_cfg( + pool_type='max', + conv_attn_early=True, + transformer_shortcut_bias=False, + rel_pos_type='mlp', + rel_pos_dim=384, # was supposed to be 512, woops + ), + ), + coatnet_rmlp_1_rw2=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + stem_width=(32, 64), + **_rw_coat_cfg( + stride_mode='dw', + rel_pos_type='mlp', + rel_pos_dim=512, # was supposed to be 512, woops + ), + ), + coatnet_rmlp_2_rw=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + stem_width=(64, 128), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + init_values=1e-6, + rel_pos_type='mlp' + ), + ), + coatnet_rmlp_3_rw=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + stem_width=(96, 192), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + init_values=1e-6, + rel_pos_type='mlp' + ), + ), + + coatnet_nano_cc=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + block_type=('C', 'C', ('C', 'T'), ('C', 'T')), + **_rw_coat_cfg(), + ), + coatnext_nano_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + weight_init='normal', + **_next_cfg( + rel_pos_type='bias', + init_values=(1e-5, None) + ), + ), + + # Trying to be like the CoAtNet paper configs + coatnet_0=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 5, 2), + stem_width=64, + head_hidden_size=768, + ), + coatnet_1=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + stem_width=64, + head_hidden_size=768, + ), + coatnet_2=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + stem_width=128, + head_hidden_size=1024, + ), + coatnet_3=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + stem_width=192, + head_hidden_size=1536, + ), + coatnet_4=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 12, 28, 2), + stem_width=192, + head_hidden_size=1536, + ), + coatnet_5=MaxxVitCfg( + embed_dim=(256, 512, 1280, 2048), + depths=(2, 12, 28, 2), + stem_width=192, + head_hidden_size=2048, + ), + + # Experimental MaxVit configs + maxvit_pico_rw=MaxxVitCfg( + embed_dim=(32, 64, 128, 256), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(24, 32), + **_rw_max_cfg(), + ), + maxvit_nano_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(1, 2, 3, 1), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(), + ), + maxvit_tiny_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(), + ), + maxvit_tiny_pm=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('PM',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(), + ), + + maxvit_rmlp_pico_rw=MaxxVitCfg( + embed_dim=(32, 64, 128, 256), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(24, 32), + **_rw_max_cfg(rel_pos_type='mlp'), + ), + maxvit_rmlp_nano_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(1, 2, 3, 1), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(rel_pos_type='mlp'), + ), + maxvit_rmlp_tiny_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(rel_pos_type='mlp'), + ), + maxvit_rmlp_small_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg( + rel_pos_type='mlp', + init_values=1e-6, + ), + ), + maxvit_rmlp_base_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + head_hidden_size=768, + **_rw_max_cfg( + rel_pos_type='mlp', + ), + ), + + maxxvit_rmlp_nano_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(1, 2, 3, 1), + block_type=('M',) * 4, + stem_width=(32, 64), + weight_init='normal', + **_next_cfg(), + ), + maxxvit_rmlp_tiny_rw=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_next_cfg(), + ), + maxxvit_rmlp_small_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(48, 96), + **_next_cfg(), + ), + + maxxvitv2_nano_rw=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(1, 2, 3, 1), + block_type=('M',) * 4, + stem_width=(48, 96), + weight_init='normal', + **_next_cfg( + no_block_attn=True, + rel_pos_type='bias', + ), + ), + maxxvitv2_rmlp_base_rw=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 12, 2), + block_type=('M',) * 4, + stem_width=(64, 128), + **_next_cfg( + no_block_attn=True, + ), + ), + maxxvitv2_rmlp_large_rw=MaxxVitCfg( + embed_dim=(160, 320, 640, 1280), + depths=(2, 6, 16, 2), + block_type=('M',) * 4, + stem_width=(80, 160), + head_hidden_size=1280, + **_next_cfg( + no_block_attn=True, + ), + ), + + # Trying to be like the MaxViT paper configs + maxvit_tiny_tf=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=64, + stem_bias=True, + head_hidden_size=512, + **_tf_cfg(), + ), + maxvit_small_tf=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=64, + stem_bias=True, + head_hidden_size=768, + **_tf_cfg(), + ), + maxvit_base_tf=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + block_type=('M',) * 4, + stem_width=64, + stem_bias=True, + head_hidden_size=768, + **_tf_cfg(), + ), + maxvit_large_tf=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + block_type=('M',) * 4, + stem_width=128, + stem_bias=True, + head_hidden_size=1024, + **_tf_cfg(), + ), + maxvit_xlarge_tf=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + block_type=('M',) * 4, + stem_width=192, + stem_bias=True, + head_hidden_size=1536, + **_tf_cfg(), + ), +) + + +def checkpoint_filter_fn(state_dict, model: nn.Module): + model_state_dict = model.state_dict() + out_dict = {} + for k, v in state_dict.items(): + if k.endswith('relative_position_bias_table'): + m = model.get_submodule(k[:-29]) + if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: + v = resize_rel_pos_bias_table( + v, + new_window_size=m.window_size, + new_bias_shape=m.relative_position_bias_table.shape, + ) + + if k in model_state_dict and v.ndim != model_state_dict[k].ndim and v.numel() == model_state_dict[k].numel(): + # adapt between conv2d / linear layers + assert v.ndim in (2, 4) + v = v.reshape(model_state_dict[k].shape) + out_dict[k] = v + return out_dict + + +def _create_maxxvit(variant, cfg_variant=None, pretrained=False, **kwargs): + if cfg_variant is None: + if variant in model_cfgs: + cfg_variant = variant + else: + cfg_variant = '_'.join(variant.split('_')[:-1]) + return build_model_with_cfg( + MaxxVit, variant, pretrained, + model_cfg=model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + 'fixed_input_size': True, + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # timm specific CoAtNet configs, ImageNet-1k pretrain, fixed rel-pos + 'coatnet_pico_rw_224.untrained': _cfg(url=''), + 'coatnet_nano_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth', + crop_pct=0.9), + 'coatnet_0_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'), + 'coatnet_1_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth' + ), + + # timm specific CoAtNet configs, ImageNet-12k pretrain w/ 1k fine-tune, fixed rel-pos + 'coatnet_2_rw_224.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/'), + #'coatnet_3_rw_224.untrained': _cfg(url=''), + + # Experimental CoAtNet configs w/ ImageNet-12k pretrain -> 1k fine-tune (different norm layers, MLP rel-pos) + 'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + # Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos) + 'coatnet_bn_0_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, + crop_pct=0.95), + 'coatnet_rmlp_nano_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth', + crop_pct=0.9), + 'coatnet_rmlp_0_rw_224.untrained': _cfg(url=''), + 'coatnet_rmlp_1_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'), + 'coatnet_rmlp_2_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'), + 'coatnet_rmlp_3_rw_224.untrained': _cfg(url=''), + 'coatnet_nano_cc_224.untrained': _cfg(url=''), + 'coatnext_nano_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth', + crop_pct=0.9), + + # ImagenNet-12k pretrain CoAtNet + 'coatnet_2_rw_224.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821), + 'coatnet_3_rw_224.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821), + 'coatnet_rmlp_1_rw2_224.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821), + 'coatnet_rmlp_2_rw_224.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821), + + # Trying to be like the CoAtNet paper configs (will adapt if 'tf' weights are ever released) + 'coatnet_0_224.untrained': _cfg(url=''), + 'coatnet_1_224.untrained': _cfg(url=''), + 'coatnet_2_224.untrained': _cfg(url=''), + 'coatnet_3_224.untrained': _cfg(url=''), + 'coatnet_4_224.untrained': _cfg(url=''), + 'coatnet_5_224.untrained': _cfg(url=''), + + # timm specific MaxVit configs, ImageNet-1k pretrain or untrained + 'maxvit_pico_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_nano_rw_256.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_tiny_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'), + 'maxvit_tiny_rw_256.untrained': _cfg( + url='', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_tiny_pm_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), + + # timm specific MaxVit w/ MLP rel-pos, ImageNet-1k pretrain + 'maxvit_rmlp_pico_rw_256.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_rmlp_nano_rw_256.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_rmlp_tiny_rw_256.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_rmlp_small_rw_224.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth', + crop_pct=0.9, + ), + 'maxvit_rmlp_small_rw_256.untrained': _cfg( + url='', + input_size=(3, 256, 256), pool_size=(8, 8)), + + # timm specific MaxVit w/ ImageNet-12k pretrain and 1k fine-tune + 'maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + ), + 'maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + + # timm specific MaxVit w/ ImageNet-12k pretrain + 'maxvit_rmlp_base_rw_224.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + ), + + # timm MaxxViT configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks) + 'maxxvit_rmlp_nano_rw_256.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxxvit_rmlp_tiny_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxxvit_rmlp_small_rw_256.sw_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + + # timm MaxxViT-V2 configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks, more width, no block attn) + 'maxxvitv2_nano_rw_256.sw_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/'), + 'maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxxvitv2_rmlp_large_rw_224.untrained': _cfg(url=''), + + 'maxxvitv2_rmlp_base_rw_224.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821), + + # MaxViT models ported from official Tensorflow impl + 'maxvit_tiny_tf_224.in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'maxvit_tiny_tf_384.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxvit_tiny_tf_512.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), + 'maxvit_small_tf_224.in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'maxvit_small_tf_384.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxvit_small_tf_512.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), + 'maxvit_base_tf_224.in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'maxvit_base_tf_384.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxvit_base_tf_512.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), + 'maxvit_large_tf_224.in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'maxvit_large_tf_384.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxvit_large_tf_512.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), + + 'maxvit_base_tf_224.in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843), + 'maxvit_base_tf_384.in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxvit_base_tf_512.in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), + 'maxvit_large_tf_224.in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843), + 'maxvit_large_tf_384.in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxvit_large_tf_512.in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 512, 512), crop_pct=1.0, crop_mode='squash'), + 'maxvit_xlarge_tf_224.in21k': _cfg( + hf_hub_id='timm/', + num_classes=21843), + 'maxvit_xlarge_tf_384.in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), + 'maxvit_xlarge_tf_512.in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), +}) + + +@register_model +def coatnet_pico_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_pico_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_nano_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_0_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_1_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_2_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_3_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_bn_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_bn_0_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_rmlp_nano_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_rmlp_0_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_rmlp_1_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_1_rw2_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_rmlp_1_rw2_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_rmlp_2_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_2_rw_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_rmlp_2_rw_384', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_rmlp_3_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_nano_cc_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_nano_cc_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnext_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnext_nano_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_0_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_0_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_1_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_1_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_2_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_2_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_3_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_3_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_4_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_4_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_5_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('coatnet_5_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_pico_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_nano_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_tiny_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_rmlp_pico_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_small_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_rmlp_base_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_rmlp_base_rw_384', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_pm_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_tiny_pm_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvitv2_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxxvitv2_nano_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvitv2_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxxvitv2_rmlp_base_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvitv2_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxxvitv2_rmlp_base_rw_384', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvitv2_rmlp_large_rw_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxxvitv2_rmlp_large_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_tf_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_tiny_tf_224', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_tf_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_tiny_tf_384', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_tf_512(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_tiny_tf_512', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_small_tf_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_small_tf_224', 'maxvit_small_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_small_tf_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_small_tf_384', 'maxvit_small_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_small_tf_512(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_small_tf_512', 'maxvit_small_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_base_tf_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_base_tf_224', 'maxvit_base_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_base_tf_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_base_tf_384', 'maxvit_base_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_base_tf_512(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_base_tf_512', 'maxvit_base_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_large_tf_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_large_tf_224', 'maxvit_large_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_large_tf_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_large_tf_384', 'maxvit_large_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_large_tf_512(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_large_tf_512', 'maxvit_large_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_xlarge_tf_224(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_xlarge_tf_224', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_xlarge_tf_384(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_xlarge_tf_384', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_xlarge_tf_512(pretrained=False, **kwargs) -> MaxxVit: + return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) diff --git a/pytorch-image-models/timm/models/metaformer.py b/pytorch-image-models/timm/models/metaformer.py new file mode 100644 index 0000000000000000000000000000000000000000..be68b2ba5ee9c6f0c6bf0671789bab511468eded --- /dev/null +++ b/pytorch-image-models/timm/models/metaformer.py @@ -0,0 +1,1059 @@ +""" +Poolformer from MetaFormer is Actually What You Need for Vision https://arxiv.org/abs/2111.11418 + +IdentityFormer, RandFormer, PoolFormerV2, ConvFormer, and CAFormer +from MetaFormer Baselines for Vision https://arxiv.org/abs/2210.13452 + +All implemented models support feature extraction and variable input resolution. + +Original implementation by Weihao Yu et al., +adapted for timm by Fredo Guan and Ross Wightman. + +Adapted from https://github.com/sail-sg/metaformer, original copyright below +""" + +# Copyright 2022 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.jit import Final + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import trunc_normal_, DropPath, SelectAdaptivePool2d, GroupNorm1, LayerNorm, LayerNorm2d, Mlp, \ + use_fused_attn +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['MetaFormer'] + + +class Stem(nn.Module): + """ + Stem implemented by a layer of convolution. + Conv2d params constant across all models. + """ + + def __init__( + self, + in_channels, + out_channels, + norm_layer=None, + ): + super().__init__() + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=7, + stride=4, + padding=2 + ) + self.norm = norm_layer(out_channels) if norm_layer else nn.Identity() + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class Downsampling(nn.Module): + """ + Downsampling implemented by a layer of convolution. + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + norm_layer=None, + ): + super().__init__() + self.norm = norm_layer(in_channels) if norm_layer else nn.Identity() + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding + ) + + def forward(self, x): + x = self.norm(x) + x = self.conv(x) + return x + + +class Scale(nn.Module): + """ + Scale vector by element multiplications. + """ + + def __init__(self, dim, init_value=1.0, trainable=True, use_nchw=True): + super().__init__() + self.shape = (dim, 1, 1) if use_nchw else (dim,) + self.scale = nn.Parameter(init_value * torch.ones(dim), requires_grad=trainable) + + def forward(self, x): + return x * self.scale.view(self.shape) + + +class SquaredReLU(nn.Module): + """ + Squared ReLU: https://arxiv.org/abs/2109.08668 + """ + + def __init__(self, inplace=False): + super().__init__() + self.relu = nn.ReLU(inplace=inplace) + + def forward(self, x): + return torch.square(self.relu(x)) + + +class StarReLU(nn.Module): + """ + StarReLU: s * relu(x) ** 2 + b + """ + + def __init__( + self, + scale_value=1.0, + bias_value=0.0, + scale_learnable=True, + bias_learnable=True, + mode=None, + inplace=False + ): + super().__init__() + self.inplace = inplace + self.relu = nn.ReLU(inplace=inplace) + self.scale = nn.Parameter(scale_value * torch.ones(1), requires_grad=scale_learnable) + self.bias = nn.Parameter(bias_value * torch.ones(1), requires_grad=bias_learnable) + + def forward(self, x): + return self.scale * self.relu(x) ** 2 + self.bias + + +class Attention(nn.Module): + """ + Vanilla self-attention from Transformer: https://arxiv.org/abs/1706.03762. + Modified from timm. + """ + fused_attn: Final[bool] + + def __init__( + self, + dim, + head_dim=32, + num_heads=None, + qkv_bias=False, + attn_drop=0., + proj_drop=0., + proj_bias=False, + **kwargs + ): + super().__init__() + + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.num_heads = num_heads if num_heads else dim // head_dim + if self.num_heads == 0: + self.num_heads = 1 + + self.attention_dim = self.num_heads * self.head_dim + + self.qkv = nn.Linear(dim, self.attention_dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(self.attention_dim, dim, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + if self.fused_attn: + x = F.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +# custom norm modules that disable the bias term, since the original models defs +# used a custom norm with a weight term but no bias term. + +class GroupNorm1NoBias(GroupNorm1): + def __init__(self, num_channels, **kwargs): + super().__init__(num_channels, **kwargs) + self.eps = kwargs.get('eps', 1e-6) + self.bias = None + + +class LayerNorm2dNoBias(LayerNorm2d): + def __init__(self, num_channels, **kwargs): + super().__init__(num_channels, **kwargs) + self.eps = kwargs.get('eps', 1e-6) + self.bias = None + + +class LayerNormNoBias(nn.LayerNorm): + def __init__(self, num_channels, **kwargs): + super().__init__(num_channels, **kwargs) + self.eps = kwargs.get('eps', 1e-6) + self.bias = None + + +class SepConv(nn.Module): + r""" + Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381. + """ + + def __init__( + self, + dim, + expansion_ratio=2, + act1_layer=StarReLU, + act2_layer=nn.Identity, + bias=False, + kernel_size=7, + padding=3, + **kwargs + ): + super().__init__() + mid_channels = int(expansion_ratio * dim) + self.pwconv1 = nn.Conv2d(dim, mid_channels, kernel_size=1, bias=bias) + self.act1 = act1_layer() + self.dwconv = nn.Conv2d( + mid_channels, mid_channels, kernel_size=kernel_size, + padding=padding, groups=mid_channels, bias=bias) # depthwise conv + self.act2 = act2_layer() + self.pwconv2 = nn.Conv2d(mid_channels, dim, kernel_size=1, bias=bias) + + def forward(self, x): + x = self.pwconv1(x) + x = self.act1(x) + x = self.dwconv(x) + x = self.act2(x) + x = self.pwconv2(x) + return x + + +class Pooling(nn.Module): + """ + Implementation of pooling for PoolFormer: https://arxiv.org/abs/2111.11418 + """ + + def __init__(self, pool_size=3, **kwargs): + super().__init__() + self.pool = nn.AvgPool2d( + pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) + + def forward(self, x): + y = self.pool(x) + return y - x + + +class MlpHead(nn.Module): + """ MLP classification head + """ + + def __init__( + self, + dim, + num_classes=1000, + mlp_ratio=4, + act_layer=SquaredReLU, + norm_layer=LayerNorm, + drop_rate=0., + bias=True + ): + super().__init__() + hidden_features = int(mlp_ratio * dim) + self.fc1 = nn.Linear(dim, hidden_features, bias=bias) + self.act = act_layer() + self.norm = norm_layer(hidden_features) + self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) + self.head_drop = nn.Dropout(drop_rate) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.norm(x) + x = self.head_drop(x) + x = self.fc2(x) + return x + + +class MetaFormerBlock(nn.Module): + """ + Implementation of one MetaFormer block. + """ + + def __init__( + self, + dim, + token_mixer=Pooling, + mlp_act=StarReLU, + mlp_bias=False, + norm_layer=LayerNorm2d, + proj_drop=0., + drop_path=0., + use_nchw=True, + layer_scale_init_value=None, + res_scale_init_value=None, + **kwargs + ): + super().__init__() + ls_layer = partial(Scale, dim=dim, init_value=layer_scale_init_value, use_nchw=use_nchw) + rs_layer = partial(Scale, dim=dim, init_value=res_scale_init_value, use_nchw=use_nchw) + + self.norm1 = norm_layer(dim) + self.token_mixer = token_mixer(dim=dim, proj_drop=proj_drop, **kwargs) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.layer_scale1 = ls_layer() if layer_scale_init_value is not None else nn.Identity() + self.res_scale1 = rs_layer() if res_scale_init_value is not None else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + dim, + int(4 * dim), + act_layer=mlp_act, + bias=mlp_bias, + drop=proj_drop, + use_conv=use_nchw, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.layer_scale2 = ls_layer() if layer_scale_init_value is not None else nn.Identity() + self.res_scale2 = rs_layer() if res_scale_init_value is not None else nn.Identity() + + def forward(self, x): + x = self.res_scale1(x) + \ + self.layer_scale1( + self.drop_path1( + self.token_mixer(self.norm1(x)) + ) + ) + x = self.res_scale2(x) + \ + self.layer_scale2( + self.drop_path2( + self.mlp(self.norm2(x)) + ) + ) + return x + + +class MetaFormerStage(nn.Module): + + def __init__( + self, + in_chs, + out_chs, + depth=2, + token_mixer=nn.Identity, + mlp_act=StarReLU, + mlp_bias=False, + downsample_norm=LayerNorm2d, + norm_layer=LayerNorm2d, + proj_drop=0., + dp_rates=[0.] * 2, + layer_scale_init_value=None, + res_scale_init_value=None, + **kwargs, + ): + super().__init__() + + self.grad_checkpointing = False + self.use_nchw = not issubclass(token_mixer, Attention) + + # don't downsample if in_chs and out_chs are the same + self.downsample = nn.Identity() if in_chs == out_chs else Downsampling( + in_chs, + out_chs, + kernel_size=3, + stride=2, + padding=1, + norm_layer=downsample_norm, + ) + + self.blocks = nn.Sequential(*[MetaFormerBlock( + dim=out_chs, + token_mixer=token_mixer, + mlp_act=mlp_act, + mlp_bias=mlp_bias, + norm_layer=norm_layer, + proj_drop=proj_drop, + drop_path=dp_rates[i], + layer_scale_init_value=layer_scale_init_value, + res_scale_init_value=res_scale_init_value, + use_nchw=self.use_nchw, + **kwargs, + ) for i in range(depth)]) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + def forward(self, x: Tensor): + x = self.downsample(x) + B, C, H, W = x.shape + + if not self.use_nchw: + x = x.reshape(B, C, -1).transpose(1, 2) + + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + + if not self.use_nchw: + x = x.transpose(1, 2).reshape(B, C, H, W) + + return x + + +class MetaFormer(nn.Module): + r""" MetaFormer + A PyTorch impl of : `MetaFormer Baselines for Vision` - + https://arxiv.org/abs/2210.13452 + + Args: + in_chans (int): Number of input image channels. + num_classes (int): Number of classes for classification head. + global_pool: Pooling for classifier head. + depths (list or tuple): Number of blocks at each stage. + dims (list or tuple): Feature dimension at each stage. + token_mixers (list, tuple or token_fcn): Token mixer for each stage. + mlp_act: Activation layer for MLP. + mlp_bias (boolean): Enable or disable mlp bias term. + drop_path_rate (float): Stochastic depth rate. + drop_rate (float): Dropout rate. + layer_scale_init_values (list, tuple, float or None): Init value for Layer Scale. + None means not use the layer scale. Form: https://arxiv.org/abs/2103.17239. + res_scale_init_values (list, tuple, float or None): Init value for res Scale on residual connections. + None means not use the res scale. From: https://arxiv.org/abs/2110.09456. + downsample_norm (nn.Module): Norm layer used in stem and downsampling layers. + norm_layers (list, tuple or norm_fcn): Norm layers for each stage. + output_norm: Norm layer before classifier head. + use_mlp_head: Use MLP classification head. + """ + + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + depths=(2, 2, 6, 2), + dims=(64, 128, 320, 512), + token_mixers=Pooling, + mlp_act=StarReLU, + mlp_bias=False, + drop_path_rate=0., + proj_drop_rate=0., + drop_rate=0.0, + layer_scale_init_values=None, + res_scale_init_values=(None, None, 1.0, 1.0), + downsample_norm=LayerNorm2dNoBias, + norm_layers=LayerNorm2dNoBias, + output_norm=LayerNorm2d, + use_mlp_head=True, + **kwargs, + ): + super().__init__() + self.num_classes = num_classes + self.num_features = dims[-1] + self.drop_rate = drop_rate + self.use_mlp_head = use_mlp_head + self.num_stages = len(depths) + + # convert everything to lists if they aren't indexable + if not isinstance(depths, (list, tuple)): + depths = [depths] # it means the model has only one stage + if not isinstance(dims, (list, tuple)): + dims = [dims] + if not isinstance(token_mixers, (list, tuple)): + token_mixers = [token_mixers] * self.num_stages + if not isinstance(norm_layers, (list, tuple)): + norm_layers = [norm_layers] * self.num_stages + if not isinstance(layer_scale_init_values, (list, tuple)): + layer_scale_init_values = [layer_scale_init_values] * self.num_stages + if not isinstance(res_scale_init_values, (list, tuple)): + res_scale_init_values = [res_scale_init_values] * self.num_stages + + self.grad_checkpointing = False + self.feature_info = [] + + self.stem = Stem( + in_chans, + dims[0], + norm_layer=downsample_norm + ) + + stages = [] + prev_dim = dims[0] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + for i in range(self.num_stages): + stages += [MetaFormerStage( + prev_dim, + dims[i], + depth=depths[i], + token_mixer=token_mixers[i], + mlp_act=mlp_act, + mlp_bias=mlp_bias, + proj_drop=proj_drop_rate, + dp_rates=dp_rates[i], + layer_scale_init_value=layer_scale_init_values[i], + res_scale_init_value=res_scale_init_values[i], + downsample_norm=downsample_norm, + norm_layer=norm_layers[i], + **kwargs, + )] + prev_dim = dims[i] + self.feature_info += [dict(num_chs=dims[i], reduction=2**(i+2), module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + # if using MlpHead, dropout is handled by MlpHead + if num_classes > 0: + if self.use_mlp_head: + # FIXME not actually returning mlp hidden state right now as pre-logits. + final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate) + self.head_hidden_size = self.num_features + else: + final = nn.Linear(self.num_features, num_classes) + self.head_hidden_size = self.num_features + else: + final = nn.Identity() + + self.head = nn.Sequential(OrderedDict([ + ('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), + ('norm', output_norm(self.num_features)), + ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), + ('drop', nn.Dropout(drop_rate) if self.use_mlp_head else nn.Identity()), + ('fc', final) + ])) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2d, nn.Linear)): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + for stage in self.stages: + stage.set_grad_checkpointing(enable=enable) + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + if global_pool is not None: + self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity() + if num_classes > 0: + if self.use_mlp_head: + final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate) + else: + final = nn.Linear(self.num_features, num_classes) + else: + final = nn.Identity() + self.head.fc = final + + def forward_head(self, x: Tensor, pre_logits: bool = False): + # NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :( + x = self.head.global_pool(x) + x = self.head.norm(x) + x = self.head.flatten(x) + x = self.head.drop(x) + return x if pre_logits else self.head.fc(x) + + def forward_features(self, x: Tensor): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + return x + + def forward(self, x: Tensor): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +# this works but it's long and breaks backwards compatability with weights from the poolformer-only impl +def checkpoint_filter_fn(state_dict, model): + if 'stem.conv.weight' in state_dict: + return state_dict + + import re + out_dict = {} + is_poolformerv1 = 'network.0.0.mlp.fc1.weight' in state_dict + model_state_dict = model.state_dict() + for k, v in state_dict.items(): + if is_poolformerv1: + k = re.sub(r'layer_scale_([0-9]+)', r'layer_scale\1.scale', k) + k = k.replace('network.1', 'downsample_layers.1') + k = k.replace('network.3', 'downsample_layers.2') + k = k.replace('network.5', 'downsample_layers.3') + k = k.replace('network.2', 'network.1') + k = k.replace('network.4', 'network.2') + k = k.replace('network.6', 'network.3') + k = k.replace('network', 'stages') + + k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k) + k = k.replace('downsample.proj', 'downsample.conv') + k = k.replace('patch_embed.proj', 'patch_embed.conv') + k = re.sub(r'([0-9]+).([0-9]+)', r'\1.blocks.\2', k) + k = k.replace('stages.0.downsample', 'patch_embed') + k = k.replace('patch_embed', 'stem') + k = k.replace('post_norm', 'norm') + k = k.replace('pre_norm', 'norm') + k = re.sub(r'^head', 'head.fc', k) + k = re.sub(r'^norm', 'head.norm', k) + + if v.shape != model_state_dict[k] and v.numel() == model_state_dict[k].numel(): + v = v.reshape(model_state_dict[k].shape) + + out_dict[k] = v + return out_dict + + +def _create_metaformer(variant, pretrained=False, **kwargs): + default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (2, 2, 6, 2)))) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + MetaFormer, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs, + ) + + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 1.0, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'classifier': 'head.fc', 'first_conv': 'stem.conv', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'poolformer_s12.sail_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.9), + 'poolformer_s24.sail_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.9), + 'poolformer_s36.sail_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.9), + 'poolformer_m36.sail_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95), + 'poolformer_m48.sail_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95), + + 'poolformerv2_s12.sail_in1k': _cfg(hf_hub_id='timm/'), + 'poolformerv2_s24.sail_in1k': _cfg(hf_hub_id='timm/'), + 'poolformerv2_s36.sail_in1k': _cfg(hf_hub_id='timm/'), + 'poolformerv2_m36.sail_in1k': _cfg(hf_hub_id='timm/'), + 'poolformerv2_m48.sail_in1k': _cfg(hf_hub_id='timm/'), + + 'convformer_s18.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_s18.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_s18.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_s18.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_s18.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), + + 'convformer_s36.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_s36.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_s36.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_s36.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_s36.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), + + 'convformer_m36.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_m36.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_m36.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_m36.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_m36.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), + + 'convformer_b36.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_b36.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_b36.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'convformer_b36.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'convformer_b36.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), + + 'caformer_s18.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_s18.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_s18.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_s18.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_s18.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), + + 'caformer_s36.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_s36.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_s36.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_s36.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_s36.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), + + 'caformer_m36.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_m36.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_m36.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_m36.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_m36.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), + + 'caformer_b36.sail_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_b36.sail_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_b36.sail_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2'), + 'caformer_b36.sail_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), + 'caformer_b36.sail_in22k': _cfg( + hf_hub_id='timm/', + classifier='head.fc.fc2', num_classes=21841), +}) + + +@register_model +def poolformer_s12(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[2, 2, 6, 2], + dims=[64, 128, 320, 512], + downsample_norm=None, + mlp_act=nn.GELU, + mlp_bias=True, + norm_layers=GroupNorm1, + layer_scale_init_values=1e-5, + res_scale_init_values=None, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformer_s12', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformer_s24(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[4, 4, 12, 4], + dims=[64, 128, 320, 512], + downsample_norm=None, + mlp_act=nn.GELU, + mlp_bias=True, + norm_layers=GroupNorm1, + layer_scale_init_values=1e-5, + res_scale_init_values=None, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformer_s24', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformer_s36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[6, 6, 18, 6], + dims=[64, 128, 320, 512], + downsample_norm=None, + mlp_act=nn.GELU, + mlp_bias=True, + norm_layers=GroupNorm1, + layer_scale_init_values=1e-6, + res_scale_init_values=None, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformer_s36', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformer_m36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[6, 6, 18, 6], + dims=[96, 192, 384, 768], + downsample_norm=None, + mlp_act=nn.GELU, + mlp_bias=True, + norm_layers=GroupNorm1, + layer_scale_init_values=1e-6, + res_scale_init_values=None, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformer_m36', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformer_m48(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[8, 8, 24, 8], + dims=[96, 192, 384, 768], + downsample_norm=None, + mlp_act=nn.GELU, + mlp_bias=True, + norm_layers=GroupNorm1, + layer_scale_init_values=1e-6, + res_scale_init_values=None, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformer_m48', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformerv2_s12(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[2, 2, 6, 2], + dims=[64, 128, 320, 512], + norm_layers=GroupNorm1NoBias, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformerv2_s12', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformerv2_s24(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[4, 4, 12, 4], + dims=[64, 128, 320, 512], + norm_layers=GroupNorm1NoBias, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformerv2_s24', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformerv2_s36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[6, 6, 18, 6], + dims=[64, 128, 320, 512], + norm_layers=GroupNorm1NoBias, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformerv2_s36', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformerv2_m36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[6, 6, 18, 6], + dims=[96, 192, 384, 768], + norm_layers=GroupNorm1NoBias, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformerv2_m36', pretrained=pretrained, **model_kwargs) + + +@register_model +def poolformerv2_m48(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[8, 8, 24, 8], + dims=[96, 192, 384, 768], + norm_layers=GroupNorm1NoBias, + use_mlp_head=False, + **kwargs) + return _create_metaformer('poolformerv2_m48', pretrained=pretrained, **model_kwargs) + + +@register_model +def convformer_s18(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 3, 9, 3], + dims=[64, 128, 320, 512], + token_mixers=SepConv, + norm_layers=LayerNorm2dNoBias, + **kwargs) + return _create_metaformer('convformer_s18', pretrained=pretrained, **model_kwargs) + + +@register_model +def convformer_s36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 12, 18, 3], + dims=[64, 128, 320, 512], + token_mixers=SepConv, + norm_layers=LayerNorm2dNoBias, + **kwargs) + return _create_metaformer('convformer_s36', pretrained=pretrained, **model_kwargs) + + +@register_model +def convformer_m36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 12, 18, 3], + dims=[96, 192, 384, 576], + token_mixers=SepConv, + norm_layers=LayerNorm2dNoBias, + **kwargs) + return _create_metaformer('convformer_m36', pretrained=pretrained, **model_kwargs) + + +@register_model +def convformer_b36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 12, 18, 3], + dims=[128, 256, 512, 768], + token_mixers=SepConv, + norm_layers=LayerNorm2dNoBias, + **kwargs) + return _create_metaformer('convformer_b36', pretrained=pretrained, **model_kwargs) + + +@register_model +def caformer_s18(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 3, 9, 3], + dims=[64, 128, 320, 512], + token_mixers=[SepConv, SepConv, Attention, Attention], + norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, + **kwargs) + return _create_metaformer('caformer_s18', pretrained=pretrained, **model_kwargs) + + +@register_model +def caformer_s36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 12, 18, 3], + dims=[64, 128, 320, 512], + token_mixers=[SepConv, SepConv, Attention, Attention], + norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, + **kwargs) + return _create_metaformer('caformer_s36', pretrained=pretrained, **model_kwargs) + + +@register_model +def caformer_m36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 12, 18, 3], + dims=[96, 192, 384, 576], + token_mixers=[SepConv, SepConv, Attention, Attention], + norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, + **kwargs) + return _create_metaformer('caformer_m36', pretrained=pretrained, **model_kwargs) + + +@register_model +def caformer_b36(pretrained=False, **kwargs) -> MetaFormer: + model_kwargs = dict( + depths=[3, 12, 18, 3], + dims=[128, 256, 512, 768], + token_mixers=[SepConv, SepConv, Attention, Attention], + norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, + **kwargs) + return _create_metaformer('caformer_b36', pretrained=pretrained, **model_kwargs) diff --git a/pytorch-image-models/timm/models/mlp_mixer.py b/pytorch-image-models/timm/models/mlp_mixer.py new file mode 100644 index 0000000000000000000000000000000000000000..25cde6a67c86ec8a434789b43752955906093168 --- /dev/null +++ b/pytorch-image-models/timm/models/mlp_mixer.py @@ -0,0 +1,710 @@ +""" MLP-Mixer, ResMLP, and gMLP in PyTorch + +This impl originally based on MLP-Mixer paper. + +Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py + +Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + +@article{tolstikhin2021, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, + Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, + journal={arXiv preprint arXiv:2105.01601}, + year={2021} +} + +Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP + +Code: https://github.com/facebookresearch/deit +Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 +@misc{touvron2021resmlp, + title={ResMLP: Feedforward networks for image classification with data-efficient training}, + author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and + Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, + year={2021}, + eprint={2105.03404}, +} + +Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 +@misc{liu2021pay, + title={Pay Attention to MLPs}, + author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, + year={2021}, + eprint={2105.08050}, +} + +A thank you to paper authors for releasing code and weights. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from functools import partial +from typing import List, Optional, Union, Tuple + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import named_apply, checkpoint_seq +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['MixerBlock', 'MlpMixer'] # model_registry will add each entrypoint fn to this + + +class MixerBlock(nn.Module): + """ Residual Block w/ token mixing and channel MLPs + Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + def __init__( + self, + dim, + seq_len, + mlp_ratio=(0.5, 4.0), + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop=0., + drop_path=0., + ): + super().__init__() + tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] + self.norm1 = norm_layer(dim) + self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Affine(nn.Module): + def __init__(self, dim): + super().__init__() + self.alpha = nn.Parameter(torch.ones((1, 1, dim))) + self.beta = nn.Parameter(torch.zeros((1, 1, dim))) + + def forward(self, x): + return torch.addcmul(self.beta, self.alpha, x) + + +class ResBlock(nn.Module): + """ Residual MLP block w/ LayerScale and Affine 'norm' + + Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + def __init__( + self, + dim, + seq_len, + mlp_ratio=4, + mlp_layer=Mlp, + norm_layer=Affine, + act_layer=nn.GELU, + init_values=1e-4, + drop=0., + drop_path=0., + ): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm1 = norm_layer(dim) + self.linear_tokens = nn.Linear(seq_len, seq_len) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) + self.ls1 = nn.Parameter(init_values * torch.ones(dim)) + self.ls2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) + return x + + +class SpatialGatingUnit(nn.Module): + """ Spatial Gating Unit + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): + super().__init__() + gate_dim = dim // 2 + self.norm = norm_layer(gate_dim) + self.proj = nn.Linear(seq_len, seq_len) + + def init_weights(self): + # special init for the projection gate, called as override by base model init + nn.init.normal_(self.proj.weight, std=1e-6) + nn.init.ones_(self.proj.bias) + + def forward(self, x): + u, v = x.chunk(2, dim=-1) + v = self.norm(v) + v = self.proj(v.transpose(-1, -2)) + return u * v.transpose(-1, -2) + + +class SpatialGatingBlock(nn.Module): + """ Residual Block w/ Spatial Gating + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__( + self, + dim, + seq_len, + mlp_ratio=4, + mlp_layer=GatedMlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop=0., + drop_path=0., + ): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm = norm_layer(dim) + sgu = partial(SpatialGatingUnit, seq_len=seq_len) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.mlp_channels(self.norm(x))) + return x + + +class MlpMixer(nn.Module): + + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + patch_size=16, + num_blocks=8, + embed_dim=512, + mlp_ratio=(0.5, 4.0), + block_layer=MixerBlock, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_rate=0., + proj_drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + global_pool='avg', + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models + self.grad_checkpointing = False + + self.stem = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if stem_norm else None, + ) + reduction = self.stem.feat_ratio() if hasattr(self.stem, 'feat_ratio') else patch_size + # FIXME drop_path (stochastic depth scaling rule or all the same?) + self.blocks = nn.Sequential(*[ + block_layer( + embed_dim, + self.stem.num_patches, + mlp_ratio, + mlp_layer=mlp_layer, + norm_layer=norm_layer, + act_layer=act_layer, + drop=proj_drop_rate, + drop_path=drop_path_rate, + ) + for _ in range(num_blocks)]) + self.feature_info = [ + dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=reduction) for i in range(num_blocks)] + self.norm = norm_layer(embed_dim) + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + @torch.jit.ignore + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + return_prefix_tokens: Return both prefix and spatial intermediate tokens + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + + # forward pass + B, _, height, width = x.shape + x = self.stem(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index + 1] + for i, blk in enumerate(blocks): + x = blk(x) + if i in take_indices: + # normalize intermediates with final norm layer if enabled + intermediates.append(self.norm(x) if norm else x) + + # process intermediates + if reshape: + # reshape to BCHW output format + H, W = self.stem.dynamic_feat_size((height, width)) + intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.blocks), indices) + self.blocks = self.blocks[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=1) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + """ Mixer weight initialization (trying to match Flax defaults) + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # like MLP init in vit (my original init) + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + # NOTE if a parent module contains init_weights method, it can override the init of the + # child modules as this will be called in depth-first order. + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ Remap checkpoints if needed """ + if 'patch_embed.proj.weight' in state_dict: + # Remap FB ResMlp models -> timm + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('patch_embed.', 'stem.') + k = k.replace('attn.', 'linear_tokens.') + k = k.replace('mlp.', 'mlp_channels.') + k = k.replace('gamma_', 'ls') + if k.endswith('.alpha') or k.endswith('.beta'): + v = v.reshape(1, 1, -1) + out_dict[k] = v + return out_dict + return state_dict + + +def _create_mixer(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 3) + model = build_model_with_cfg( + MlpMixer, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'mixer_s32_224.untrained': _cfg(), + 'mixer_s16_224.untrained': _cfg(), + 'mixer_b32_224.untrained': _cfg(), + 'mixer_b16_224.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', + ), + 'mixer_b16_224.goog_in21k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', + num_classes=21843 + ), + 'mixer_l32_224.untrained': _cfg(), + 'mixer_l16_224.goog_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', + ), + 'mixer_l16_224.goog_in21k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', + num_classes=21843 + ), + + # Mixer ImageNet-21K-P pretraining + 'mixer_b16_224.miil_in21k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil_in21k-2a558a71.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + 'mixer_b16_224.miil_in21k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil-9229a591.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', + ), + + 'gmixer_12_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'gmixer_24_224.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + 'resmlp_12_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'resmlp_24_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'resmlp_36_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'resmlp_big_24_224.fb_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + 'resmlp_12_224.fb_distilled_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'resmlp_24_224.fb_distilled_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'resmlp_36_224.fb_distilled_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'resmlp_big_24_224.fb_distilled_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + 'resmlp_big_24_224.fb_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + 'resmlp_12_224.fb_dino': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'resmlp_24_224.fb_dino': _cfg( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + 'gmlp_ti16_224.untrained': _cfg(), + 'gmlp_s16_224.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', + ), + 'gmlp_b16_224.untrained': _cfg(), +}) + + +@register_model +def mixer_s32_224(pretrained=False, **kwargs) -> MlpMixer: + """ Mixer-S/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_s16_224(pretrained=False, **kwargs) -> MlpMixer: + """ Mixer-S/16 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b32_224(pretrained=False, **kwargs) -> MlpMixer: + """ Mixer-B/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224(pretrained=False, **kwargs) -> MlpMixer: + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l32_224(pretrained=False, **kwargs) -> MlpMixer: + """ Mixer-L/32 224x224. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224(pretrained=False, **kwargs) -> MlpMixer: + """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_12_224(pretrained=False, **kwargs) -> MlpMixer: + """ Glu-Mixer-12 224x224 + Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_24_224(pretrained=False, **kwargs) -> MlpMixer: + """ Glu-Mixer-24 224x224 + Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224(pretrained=False, **kwargs) -> MlpMixer: + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224(pretrained=False, **kwargs) -> MlpMixer: + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_224(pretrained=False, **kwargs) -> MlpMixer: + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224(pretrained=False, **kwargs) -> MlpMixer: + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_ti16_224(pretrained=False, **kwargs) -> MlpMixer: + """ gMLP-Tiny + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_s16_224(pretrained=False, **kwargs) -> MlpMixer: + """ gMLP-Small + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_b16_224(pretrained=False, **kwargs) -> MlpMixer: + """ gMLP-Base + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) + return model + + +register_model_deprecations(__name__, { + 'mixer_b16_224_in21k': 'mixer_b16_224.goog_in21k_ft_in1k', + 'mixer_l16_224_in21k': 'mixer_l16_224.goog_in21k_ft_in1k', + 'mixer_b16_224_miil': 'mixer_b16_224.miil_in21k_ft_in1k', + 'mixer_b16_224_miil_in21k': 'mixer_b16_224.miil_in21k', + 'resmlp_12_distilled_224': 'resmlp_12_224.fb_distilled_in1k', + 'resmlp_24_distilled_224': 'resmlp_24_224.fb_distilled_in1k', + 'resmlp_36_distilled_224': 'resmlp_36_224.fb_distilled_in1k', + 'resmlp_big_24_distilled_224': 'resmlp_big_24_224.fb_distilled_in1k', + 'resmlp_big_24_224_in22ft1k': 'resmlp_big_24_224.fb_in22k_ft_in1k', + 'resmlp_12_224_dino': 'resmlp_12_224', + 'resmlp_24_224_dino': 'resmlp_24_224', +}) diff --git a/pytorch-image-models/timm/models/mobilenetv3.py b/pytorch-image-models/timm/models/mobilenetv3.py new file mode 100644 index 0000000000000000000000000000000000000000..79a51f7729b0ecdc085b9c39f016616eec5b87e1 --- /dev/null +++ b/pytorch-image-models/timm/models/mobilenetv3.py @@ -0,0 +1,1376 @@ +""" MobileNet V3 + +A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. + +Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 + +Hacked together by / Copyright 2019, Ross Wightman +""" +from functools import partial +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from timm.layers import SelectAdaptivePool2d, Linear, LayerType, PadType, create_conv2d, get_norm_act_layer +from ._builder import build_model_with_cfg, pretrained_cfg_for_features +from ._efficientnet_blocks import SqueezeExcite +from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from ._features import FeatureInfo, FeatureHooks, feature_take_indices +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['MobileNetV3', 'MobileNetV3Features'] + + +class MobileNetV3(nn.Module): + """ MobiletNet-V3 + + Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific + 'efficient head', where global pooling is done before the head convolution without a final batch-norm + layer before the classifier. + + Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 + + Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include: + * HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class) + * FBNet-V3 - https://arxiv.org/abs/2006.02049 + * LCNet - https://arxiv.org/abs/2109.15099 + * MobileNet-V4 - https://arxiv.org/abs/2404.10518 + """ + + def __init__( + self, + block_args: BlockArgs, + num_classes: int = 1000, + in_chans: int = 3, + stem_size: int = 16, + fix_stem: bool = False, + num_features: int = 1280, + head_bias: bool = True, + head_norm: bool = False, + pad_type: str = '', + act_layer: Optional[LayerType] = None, + norm_layer: Optional[LayerType] = None, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[LayerType] = None, + se_from_exp: bool = True, + round_chs_fn: Callable = round_channels, + drop_rate: float = 0., + drop_path_rate: float = 0., + layer_scale_init_value: Optional[float] = None, + global_pool: str = 'avg', + ): + """ + Args: + block_args: Arguments for blocks of the network. + num_classes: Number of classes for classification head. + in_chans: Number of input image channels. + stem_size: Number of output channels of the initial stem convolution. + fix_stem: If True, don't scale stem by round_chs_fn. + num_features: Number of output channels of the conv head layer. + head_bias: If True, add a learnable bias to the conv head layer. + pad_type: Type of padding to use for convolution layers. + act_layer: Type of activation layer. + norm_layer: Type of normalization layer. + aa_layer: Type of anti-aliasing layer. + se_layer: Type of Squeeze-and-Excite layer. + se_from_exp: If True, calculate SE channel reduction from expanded mid channels. + round_chs_fn: Callable to round number of filters based on depth multiplier. + drop_rate: Dropout rate. + drop_path_rate: Stochastic depth rate. + layer_scale_init_value: Enable layer scale on compatible blocks if not None. + global_pool: Type of pooling to use for global pooling features of the FC head. + """ + super(MobileNetV3, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_act_layer(stem_size, inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=32, + pad_type=pad_type, + round_chs_fn=round_chs_fn, + se_from_exp=se_from_exp, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + se_layer=se_layer, + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + ) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + self.stage_ends = [f['stage'] for f in self.feature_info] + self.num_features = builder.in_chs # features of last stage, output of forward_features() + self.head_hidden_size = num_features # features of conv_head, pre_logits output + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + num_pooled_chs = self.num_features * self.global_pool.feat_mult() + if head_norm: + # mobilenet-v4 post-pooling PW conv is followed by a norm+act layer + self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type) # never bias + self.norm_head = norm_act_layer(self.head_hidden_size) + self.act2 = nn.Identity() + else: + # mobilenet-v3 and others only have an activation after final PW conv + self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type, bias=head_bias) + self.norm_head = nn.Identity() + self.act2 = act_layer(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1] + layers.extend(self.blocks) + layers.extend([self.global_pool, self.conv_head, self.norm_head, self.act2]) + layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse: bool = False): + return dict( + stem=r'^conv_stem|bn1', + blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)' + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable: bool = True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classifier + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + # NOTE: cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + extra_blocks: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + if stop_early: + assert intermediates_only, 'Must use intermediates_only for early stopping.' + intermediates = [] + if extra_blocks: + take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) + else: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + take_indices = [self.stage_ends[i] for i in take_indices] + max_index = self.stage_ends[max_index] + + # forward pass + feat_idx = 0 # stem is index 0 + x = self.conv_stem(x) + x = self.bn1(x) + if feat_idx in take_indices: + intermediates.append(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + blocks = self.blocks + else: + blocks = self.blocks[:max_index] + for blk in blocks: + feat_idx += 1 + x = blk(x) + if feat_idx in take_indices: + intermediates.append(x) + + if intermediates_only: + return intermediates + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + extra_blocks: bool = False, + ): + """ Prune layers not required for specified intermediates. + """ + if extra_blocks: + take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) + else: + take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) + max_index = self.stage_ends[max_index] + self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0 + if max_index < len(self.blocks): + self.conv_head = nn.Identity() + self.norm_head = nn.Identity() + if prune_head: + self.conv_head = nn.Identity() + self.norm_head = nn.Identity() + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv_stem(x) + x = self.bn1(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=True) + else: + x = self.blocks(x) + return x + + def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: + x = self.global_pool(x) + x = self.conv_head(x) + x = self.norm_head(x) + x = self.act2(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + if pre_logits: + return x + return self.classifier(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class MobileNetV3Features(nn.Module): + """ MobileNetV3 Feature Extractor + + A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation + and object detection models. + """ + + def __init__( + self, + block_args: BlockArgs, + out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), + feature_location: str = 'bottleneck', + in_chans: int = 3, + stem_size: int = 16, + fix_stem: bool = False, + output_stride: int = 32, + pad_type: PadType = '', + round_chs_fn: Callable = round_channels, + se_from_exp: bool = True, + act_layer: Optional[LayerType] = None, + norm_layer: Optional[LayerType] = None, + aa_layer: Optional[LayerType] = None, + se_layer: Optional[LayerType] = None, + drop_rate: float = 0., + drop_path_rate: float = 0., + layer_scale_init_value: Optional[float] = None, + ): + """ + Args: + block_args: Arguments for blocks of the network. + out_indices: Output from stages at indices. + feature_location: Location of feature before/after each block, must be in ['bottleneck', 'expansion'] + in_chans: Number of input image channels. + stem_size: Number of output channels of the initial stem convolution. + fix_stem: If True, don't scale stem by round_chs_fn. + output_stride: Output stride of the network. + pad_type: Type of padding to use for convolution layers. + round_chs_fn: Callable to round number of filters based on depth multiplier. + se_from_exp: If True, calculate SE channel reduction from expanded mid channels. + act_layer: Type of activation layer. + norm_layer: Type of normalization layer. + se_layer: Type of Squeeze-and-Excite layer. + drop_rate: Dropout rate. + drop_path_rate: Stochastic depth rate. + layer_scale_init_value: Enable layer scale on compatible blocks if not None. + """ + super(MobileNetV3Features, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + self.grad_checkpointing = False + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, + pad_type=pad_type, + round_chs_fn=round_chs_fn, + se_from_exp=se_from_exp, + act_layer=act_layer, + norm_layer=norm_layer, + aa_layer=aa_layer, + se_layer=se_layer, + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + feature_location=feature_location, + ) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable: bool = True): + self.grad_checkpointing = enable + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(b, x) + else: + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_mnv3(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV3: + features_mode = '' + model_cls = MobileNetV3 + kwargs_filter = None + if kwargs.pop('features_only', False): + if 'feature_cfg' in kwargs or 'feature_cls' in kwargs: + features_mode = 'cfg' + else: + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'head_norm', 'global_pool') + model_cls = MobileNetV3Features + features_mode = 'cls' + + model = build_model_with_cfg( + model_cls, + variant, + pretrained, + features_only=features_mode == 'cfg', + pretrained_strict=features_mode != 'cls', + kwargs_filter=kwargs_filter, + **kwargs, + ) + if features_mode == 'cls': + model.default_cfg = pretrained_cfg_for_features(model.default_cfg) + return model + + +def _gen_mobilenet_v3_rw( + variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs +) -> MobileNetV3: + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + head_bias=False, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v3( + variant: str, channel_multiplier: float = 1.0, depth_multiplier: float = 1.0, + group_size=None, pretrained: bool = False, **kwargs +) -> MobileNetV3: + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + if 'small' in variant: + num_features = 1024 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16'], + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], + # stage 2, 28x28 in + ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], + # stage 3, 14x14 in + ['ir_r2_k3_s1_e3_c48'], + # stage 4, 14x14in + ['ir_r3_k3_s2_e6_c96'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu + # stage 2, 28x28 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish + # stage 3, 14x14 in + ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish + # stage 4, 14x14in + ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], # hard-swish + ] + else: + num_features = 1280 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k3_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112'], + # stage 5, 14x14in + ['ir_r3_k3_s2_e6_c160'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, group_size=group_size), + num_features=num_features, + stem_size=16, + fix_stem=channel_multiplier < 0.75, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetv3(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs): + """ FBNetV3 + Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` + - https://arxiv.org/abs/2006.02049 + FIXME untested, this is a preliminary impl of some FBNet-V3 variants. + """ + vl = variant.split('_')[-1] + if vl in ('a', 'b'): + stem_size = 16 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], + ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], + ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], + ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], + ['cn_r1_k1_s1_c1344'], + ] + elif vl == 'd': + stem_size = 24 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], + ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], + ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], + ['cn_r1_k1_s1_c1440'], + ] + elif vl == 'g': + stem_size = 32 + arch_def = [ + ['ds_r3_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], + ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], + ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], + ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], + ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], + ['cn_r1_k1_s1_c1728'], + ] + else: + raise NotImplemented + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) + act_layer = resolve_act_layer(kwargs, 'hard_swish') + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1984, + head_bias=False, + stem_size=stem_size, + round_chs_fn=round_chs_fn, + se_from_exp=False, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_lcnet(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs): + """ LCNet + Essentially a MobileNet-V3 crossed with a MobileNet-V1 + + Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['dsa_r1_k3_s1_c32'], + # stage 1, 112x112 in + ['dsa_r2_k3_s2_c64'], + # stage 2, 56x56 in + ['dsa_r2_k3_s2_c128'], + # stage 3, 28x28 in + ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], + # stage 4, 14x14in + ['dsa_r4_k5_s1_c256'], + # stage 5, 14x14in + ['dsa_r2_k5_s2_c512_se0.25'], + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), + num_features=1280, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v4( + variant: str, channel_multiplier: float = 1.0, group_size=None, pretrained: bool = False, **kwargs, +) -> MobileNetV3: + """Creates a MobileNet-V4 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + num_features = 1280 + if 'hybrid' in variant: + layer_scale_init_value = 1e-5 + if 'medium' in variant: + stem_size = 32 + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + [ + 'er_r1_k3_s2_e4_c48' # FusedIB (EdgeResidual) + ], + # stage 1, 56x56 in + [ + 'uir_r1_a3_k5_s2_e4_c80', # ExtraDW + 'uir_r1_a3_k3_s1_e2_c80', # ExtraDW + ], + # stage 2, 28x28 in + [ + 'uir_r1_a3_k5_s2_e6_c160', # ExtraDW + 'uir_r1_a0_k0_s1_e2_c160', # FFN + 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW + 'uir_r1_a3_k5_s1_e4_c160', # ExtraDW + 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample + 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW + 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample + 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt + 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample + 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW + 'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample + 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt + ], + # stage 3, 14x14in + [ + 'uir_r1_a5_k5_s2_e6_c256', # ExtraDW + 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW + 'uir_r2_a3_k5_s1_e4_c256', # ExtraDW + 'uir_r1_a0_k0_s1_e2_c256', # FFN + 'uir_r1_a3_k5_s1_e2_c256', # ExtraDW + 'uir_r1_a0_k0_s1_e2_c256', # FFN + 'uir_r1_a0_k0_s1_e4_c256', # FFN + 'mqa_r1_k3_h4_s1_d64_c256', # MQA + 'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt + 'mqa_r1_k3_h4_s1_d64_c256', # MQA + 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW + 'mqa_r1_k3_h4_s1_d64_c256', # MQA + 'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt + 'mqa_r1_k3_h4_s1_d64_c256', # MQA + 'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt + ], + # stage 4, 7x7 in + [ + 'cn_r1_k1_s1_c960' # Conv + ], + ] + elif 'large' in variant: + stem_size = 24 + act_layer = resolve_act_layer(kwargs, 'gelu') + arch_def = [ + # stage 0, 112x112 in + [ + 'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual) + ], + # stage 1, 56x56 in + [ + 'uir_r1_a3_k5_s2_e4_c96', # ExtraDW + 'uir_r1_a3_k3_s1_e4_c96', # ExtraDW + ], + # stage 2, 28x28 in + [ + 'uir_r1_a3_k5_s2_e4_c192', # ExtraDW + 'uir_r3_a3_k3_s1_e4_c192', # ExtraDW + 'uir_r1_a3_k5_s1_e4_c192', # ExtraDW + 'uir_r2_a5_k3_s1_e4_c192', # ExtraDW + 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample + 'uir_r1_a5_k3_s1_e4_c192', # ExtraDW + 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample + 'uir_r1_a5_k3_s1_e4_c192', # ExtraDW + 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample + 'uir_r1_a5_k3_s1_e4_c192', # ExtraDW + 'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample + 'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt + ], + # stage 3, 14x14in + [ + 'uir_r4_a5_k5_s2_e4_c512', # ExtraDW + 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt + 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW + 'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt + 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW + 'uir_r1_a5_k5_s1_e4_c512', # ExtraDW + 'mqa_r1_k3_h8_s1_d64_c512', # MQA + 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt + 'mqa_r1_k3_h8_s1_d64_c512', # MQA + 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt + 'mqa_r1_k3_h8_s1_d64_c512', # MQA + 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt + 'mqa_r1_k3_h8_s1_d64_c512', # MQA + 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt + ], + # stage 4, 7x7 in + [ + 'cn_r1_k1_s1_c960', # Conv + ], + ] + else: + assert False, f'Unknown variant {variant}.' + else: + layer_scale_init_value = None + if 'small' in variant: + stem_size = 32 + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + [ + 'cn_r1_k3_s2_e1_c32', # Conv + 'cn_r1_k1_s1_e1_c32', # Conv + ], + # stage 1, 56x56 in + [ + 'cn_r1_k3_s2_e1_c96', # Conv + 'cn_r1_k1_s1_e1_c64', # Conv + ], + # stage 2, 28x28 in + [ + 'uir_r1_a5_k5_s2_e3_c96', # ExtraDW + 'uir_r4_a0_k3_s1_e2_c96', # IR + 'uir_r1_a3_k0_s1_e4_c96', # ConvNeXt + ], + # stage 3, 14x14 in + [ + 'uir_r1_a3_k3_s2_e6_c128', # ExtraDW + 'uir_r1_a5_k5_s1_e4_c128', # ExtraDW + 'uir_r1_a0_k5_s1_e4_c128', # IR + 'uir_r1_a0_k5_s1_e3_c128', # IR + 'uir_r2_a0_k3_s1_e4_c128', # IR + ], + # stage 4, 7x7 in + [ + 'cn_r1_k1_s1_c960', # Conv + ], + ] + elif 'medium' in variant: + stem_size = 32 + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + [ + 'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual) + ], + # stage 1, 56x56 in + [ + 'uir_r1_a3_k5_s2_e4_c80', # ExtraDW + 'uir_r1_a3_k3_s1_e2_c80', # ExtraDW + ], + # stage 2, 28x28 in + [ + 'uir_r1_a3_k5_s2_e6_c160', # ExtraDW + 'uir_r2_a3_k3_s1_e4_c160', # ExtraDW + 'uir_r1_a3_k5_s1_e4_c160', # ExtraDW + 'uir_r1_a3_k3_s1_e4_c160', # ExtraDW + 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt + 'uir_r1_a0_k0_s1_e2_c160', # ExtraDW + 'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt + ], + # stage 3, 14x14in + [ + 'uir_r1_a5_k5_s2_e6_c256', # ExtraDW + 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW + 'uir_r2_a3_k5_s1_e4_c256', # ExtraDW + 'uir_r1_a0_k0_s1_e4_c256', # FFN + 'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt + 'uir_r1_a3_k5_s1_e2_c256', # ExtraDW + 'uir_r1_a5_k5_s1_e4_c256', # ExtraDW + 'uir_r2_a0_k0_s1_e4_c256', # FFN + 'uir_r1_a5_k0_s1_e2_c256', # ConvNeXt + ], + # stage 4, 7x7 in + [ + 'cn_r1_k1_s1_c960', # Conv + ], + ] + elif 'large' in variant: + stem_size = 24 + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + [ + 'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual) + ], + # stage 1, 56x56 in + [ + 'uir_r1_a3_k5_s2_e4_c96', # ExtraDW + 'uir_r1_a3_k3_s1_e4_c96', # ExtraDW + ], + # stage 2, 28x28 in + [ + 'uir_r1_a3_k5_s2_e4_c192', # ExtraDW + 'uir_r3_a3_k3_s1_e4_c192', # ExtraDW + 'uir_r1_a3_k5_s1_e4_c192', # ExtraDW + 'uir_r5_a5_k3_s1_e4_c192', # ExtraDW + 'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt + ], + # stage 3, 14x14in + [ + 'uir_r4_a5_k5_s2_e4_c512', # ExtraDW + 'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt + 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW + 'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt + 'uir_r1_a5_k3_s1_e4_c512', # ExtraDW + 'uir_r1_a5_k5_s1_e4_c512', # ExtraDW + 'uir_r3_a5_k0_s1_e4_c512', # ConvNeXt + + ], + # stage 4, 7x7 in + [ + 'cn_r1_k1_s1_c960', # Conv + ], + ] + else: + assert False, f'Unknown variant {variant}.' + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, group_size=group_size), + head_bias=False, + head_norm=True, + num_features=num_features, + stem_size=stem_size, + fix_stem=channel_multiplier < 1.0, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + layer_scale_init_value=layer_scale_init_value, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _cfg(url: str = '', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'mobilenetv3_large_075.untrained': _cfg(url=''), + 'mobilenetv3_large_100.ra_in1k': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth', + hf_hub_id='timm/'), + 'mobilenetv3_large_100.ra4_e3600_r224_in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0), + 'mobilenetv3_large_100.miil_in21k_ft_in1k': _cfg( + interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), + origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', + paper_ids='arXiv:2104.10972v4', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth', + hf_hub_id='timm/'), + 'mobilenetv3_large_100.miil_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth', + hf_hub_id='timm/', + origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', + paper_ids='arXiv:2104.10972v4', + interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), num_classes=11221), + 'mobilenetv3_large_150d.ra4_e3600_r256_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8), test_input_size=(3, 320, 320), test_crop_pct=1.0), + + 'mobilenetv3_small_050.lamb_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth', + hf_hub_id='timm/', + interpolation='bicubic'), + 'mobilenetv3_small_075.lamb_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth', + hf_hub_id='timm/', + interpolation='bicubic'), + 'mobilenetv3_small_100.lamb_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth', + hf_hub_id='timm/', + interpolation='bicubic'), + + 'mobilenetv3_rw.rmsp_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + hf_hub_id='timm/', + interpolation='bicubic'), + + 'tf_mobilenetv3_large_075.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100.in1k': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'fbnetv3_b.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth', + hf_hub_id='timm/', + test_input_size=(3, 256, 256), crop_pct=0.95), + 'fbnetv3_d.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth', + hf_hub_id='timm/', + test_input_size=(3, 256, 256), crop_pct=0.95), + 'fbnetv3_g.ra2_in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth', + hf_hub_id='timm/', + input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)), + + "lcnet_035.untrained": _cfg(), + "lcnet_050.ra2_in1k": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth', + hf_hub_id='timm/', + interpolation='bicubic', + ), + "lcnet_075.ra2_in1k": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth', + hf_hub_id='timm/', + interpolation='bicubic', + ), + "lcnet_100.ra2_in1k": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth', + hf_hub_id='timm/', + interpolation='bicubic', + ), + "lcnet_150.untrained": _cfg(), + + 'mobilenetv4_conv_small_035.untrained': _cfg( + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), + 'mobilenetv4_conv_small_050.e3000_r224_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), + 'mobilenetv4_conv_small.e2400_r224_in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), + 'mobilenetv4_conv_small.e1200_r224_in1k': _cfg( + hf_hub_id='timm/', + test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'), + 'mobilenetv4_conv_small.e3600_r256_in1k': _cfg( + hf_hub_id='timm/', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, + test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_medium.e500_r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_medium.e500_r224_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), + + 'mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, interpolation='bicubic'), + 'mobilenetv4_conv_medium.e180_r384_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_medium.e180_ad_r384_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_medium.e250_r384_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=1.0, interpolation='bicubic'), + + 'mobilenetv4_conv_large.e600_r384_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_large.e500_r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), + + 'mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_hybrid_medium.ix_e550_r256_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_hybrid_medium.ix_e550_r384_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_hybrid_medium.e500_r224_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_hybrid_medium.e200_r256_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_hybrid_large.ix_e600_r384_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_hybrid_large.e600_r384_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), + + # experimental + 'mobilenetv4_conv_aa_medium.untrained': _cfg( + # hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'), + 'mobilenetv4_conv_blur_medium.e500_r224_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 448, 448), pool_size=(14, 14), + crop_pct=0.95, test_input_size=(3, 544, 544), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_aa_large.e600_r384_in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_conv_aa_large.e230_r384_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821, + input_size=(3, 384, 384), pool_size=(12, 12), + crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'), + 'mobilenetv4_hybrid_medium_075.untrained': _cfg( + # hf_hub_id='timm/', + crop_pct=0.95, interpolation='bicubic'), + 'mobilenetv4_hybrid_large_075.untrained': _cfg( + # hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'), +}) + + +@register_model +def mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def mobilenetv3_large_150d(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_150d', 1.5, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + +@register_model +def mobilenetv3_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_rw(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V3 """ + kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) + kwargs.setdefault('pad_type', 'same') + model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_b(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ FBNetV3-B """ + model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_d(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ FBNetV3-D """ + model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_g(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ FBNetV3-G """ + model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_035(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ PP-LCNet 0.35""" + model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_050(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ PP-LCNet 0.5""" + model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_075(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ PP-LCNet 1.0""" + model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_100(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ PP-LCNet 1.0""" + model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_150(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ PP-LCNet 1.5""" + model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_conv_small_035(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 """ + model = _gen_mobilenet_v4('mobilenetv4_conv_small_035', 0.35, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_conv_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 """ + model = _gen_mobilenet_v4('mobilenetv4_conv_small_050', 0.50, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_conv_small(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 """ + model = _gen_mobilenet_v4('mobilenetv4_conv_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_conv_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 """ + model = _gen_mobilenet_v4('mobilenetv4_conv_medium', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_conv_large(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 """ + model = _gen_mobilenet_v4('mobilenetv4_conv_large', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_hybrid_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 Hybrid """ + model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_hybrid_large(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 Hybrid""" + model = _gen_mobilenet_v4('mobilenetv4_hybrid_large', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_conv_aa_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 w/ AvgPool AA """ + model = _gen_mobilenet_v4('mobilenetv4_conv_aa_medium', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs) + return model + + +@register_model +def mobilenetv4_conv_blur_medium(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 Conv w/ Blur AA """ + model = _gen_mobilenet_v4('mobilenetv4_conv_blur_medium', 1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs) + return model + + +@register_model +def mobilenetv4_conv_aa_large(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 w/ AvgPool AA """ + model = _gen_mobilenet_v4('mobilenetv4_conv_aa_large', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs) + return model + + +@register_model +def mobilenetv4_hybrid_medium_075(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 Hybrid """ + model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv4_hybrid_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3: + """ MobileNet V4 Hybrid""" + model = _gen_mobilenet_v4('mobilenetv4_hybrid_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +register_model_deprecations(__name__, { + 'mobilenetv3_large_100_miil': 'mobilenetv3_large_100.miil_in21k_ft_in1k', + 'mobilenetv3_large_100_miil_in21k': 'mobilenetv3_large_100.miil_in21k', +}) diff --git a/pytorch-image-models/timm/models/mobilevit.py b/pytorch-image-models/timm/models/mobilevit.py new file mode 100644 index 0000000000000000000000000000000000000000..9c84871e6daeca044865574437f9e0294a6f3e7d --- /dev/null +++ b/pytorch-image-models/timm/models/mobilevit.py @@ -0,0 +1,681 @@ +""" MobileViT + +Paper: +V1: `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - https://arxiv.org/abs/2110.02178 +V2: `Separable Self-attention for Mobile Vision Transformers` - https://arxiv.org/abs/2206.02680 + +MobileVitBlock and checkpoints adapted from https://github.com/apple/ml-cvnets (original copyright below) +License: https://github.com/apple/ml-cvnets/blob/main/LICENSE (Apple open source) + +Rest of code, ByobNet, and Transformer block hacked together by / Copyright 2022, Ross Wightman +""" +# +# For licensing see accompanying LICENSE file. +# Copyright (C) 2020 Apple Inc. All Rights Reserved. +# +import math +from typing import Callable, Tuple, Optional + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.layers import to_2tuple, make_divisible, GroupNorm1, ConvMlp, DropPath, is_exportable +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_module +from ._registry import register_model, generate_default_cfgs, register_model_deprecations +from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups +from .vision_transformer import Block as TransformerBlock + +__all__ = [] + + +def _inverted_residual_block(d, c, s, br=4.0): + # inverted residual is a bottleneck block with bottle_ratio > 1 applied to in_chs, linear output, gs=1 (depthwise) + return ByoBlockCfg( + type='bottle', d=d, c=c, s=s, gs=1, br=br, + block_kwargs=dict(bottle_in=True, linear_out=True)) + + +def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0): + # inverted residual + mobilevit blocks as per MobileViT network + return ( + _inverted_residual_block(d=d, c=c, s=s, br=br), + ByoBlockCfg( + type='mobilevit', d=1, c=c, s=1, + block_kwargs=dict( + transformer_dim=transformer_dim, + transformer_depth=transformer_depth, + patch_size=patch_size) + ) + ) + + +def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5): + # inverted residual + mobilevit blocks as per MobileViT network + return ( + _inverted_residual_block(d=d, c=c, s=s, br=br), + ByoBlockCfg( + type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1, + block_kwargs=dict( + transformer_depth=transformer_depth, + patch_size=patch_size) + ) + ) + + +def _mobilevitv2_cfg(multiplier=1.0): + chs = (64, 128, 256, 384, 512) + if multiplier != 1.0: + chs = tuple([int(c * multiplier) for c in chs]) + cfg = ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=chs[0], s=1, br=2.0), + _inverted_residual_block(d=2, c=chs[1], s=2, br=2.0), + _mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2), + _mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4), + _mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3), + ), + stem_chs=int(32 * multiplier), + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + ) + return cfg + + +model_cfgs = dict( + mobilevit_xxs=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=16, s=1, br=2.0), + _inverted_residual_block(d=3, c=24, s=2, br=2.0), + _mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0), + _mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0), + _mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + num_features=320, + ), + + mobilevit_xs=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=32, s=1), + _inverted_residual_block(d=3, c=48, s=2), + _mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2), + _mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2), + _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + num_features=384, + ), + + mobilevit_s=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=32, s=1), + _inverted_residual_block(d=3, c=64, s=2), + _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), + _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), + _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + num_features=640, + ), + + semobilevit_s=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=32, s=1), + _inverted_residual_block(d=3, c=64, s=2), + _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), + _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), + _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + attn_layer='se', + attn_kwargs=dict(rd_ratio=1/8), + num_features=640, + ), + + mobilevitv2_050=_mobilevitv2_cfg(.50), + mobilevitv2_075=_mobilevitv2_cfg(.75), + mobilevitv2_125=_mobilevitv2_cfg(1.25), + mobilevitv2_100=_mobilevitv2_cfg(1.0), + mobilevitv2_150=_mobilevitv2_cfg(1.5), + mobilevitv2_175=_mobilevitv2_cfg(1.75), + mobilevitv2_200=_mobilevitv2_cfg(2.0), +) + + +@register_notrace_module +class MobileVitBlock(nn.Module): + """ MobileViT block + Paper: https://arxiv.org/abs/2110.02178?context=cs.LG + """ + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + kernel_size: int = 3, + stride: int = 1, + bottle_ratio: float = 1.0, + group_size: Optional[int] = None, + dilation: Tuple[int, int] = (1, 1), + mlp_ratio: float = 2.0, + transformer_dim: Optional[int] = None, + transformer_depth: int = 2, + patch_size: int = 8, + num_heads: int = 4, + attn_drop: float = 0., + drop: int = 0., + no_fusion: bool = False, + drop_path_rate: float = 0., + layers: LayerFn = None, + transformer_norm_layer: Callable = nn.LayerNorm, + **kwargs, # eat unused args + ): + super(MobileVitBlock, self).__init__() + + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + out_chs = out_chs or in_chs + transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) + + self.conv_kxk = layers.conv_norm_act( + in_chs, in_chs, kernel_size=kernel_size, + stride=stride, groups=groups, dilation=dilation[0]) + self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) + + self.transformer = nn.Sequential(*[ + TransformerBlock( + transformer_dim, + mlp_ratio=mlp_ratio, + num_heads=num_heads, + qkv_bias=True, + attn_drop=attn_drop, + proj_drop=drop, + drop_path=drop_path_rate, + act_layer=layers.act, + norm_layer=transformer_norm_layer, + ) + for _ in range(transformer_depth) + ]) + self.norm = transformer_norm_layer(transformer_dim) + + self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1) + + if no_fusion: + self.conv_fusion = None + else: + self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1) + + self.patch_size = to_2tuple(patch_size) + self.patch_area = self.patch_size[0] * self.patch_size[1] + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + + # Local representation + x = self.conv_kxk(x) + x = self.conv_1x1(x) + + # Unfold (feature map -> patches) + patch_h, patch_w = self.patch_size + B, C, H, W = x.shape + new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w + num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w + num_patches = num_patch_h * num_patch_w # N + interpolate = False + if new_h != H or new_w != W: + # Note: Padding can be done, but then it needs to be handled in attention function. + x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False) + interpolate = True + + # [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w] + x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2) + # [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w + x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1) + + # Global representations + x = self.transformer(x) + x = self.norm(x) + + # Fold (patch -> feature map) + # [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w] + x = x.contiguous().view(B, self.patch_area, num_patches, -1) + x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w) + # [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W] + x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) + if interpolate: + x = F.interpolate(x, size=(H, W), mode="bilinear", align_corners=False) + + x = self.conv_proj(x) + if self.conv_fusion is not None: + x = self.conv_fusion(torch.cat((shortcut, x), dim=1)) + return x + + +class LinearSelfAttention(nn.Module): + """ + This layer applies a self-attention with linear complexity, as described in `https://arxiv.org/abs/2206.02680` + This layer can be used for self- as well as cross-attention. + Args: + embed_dim (int): :math:`C` from an expected input of size :math:`(N, C, H, W)` + attn_drop (float): Dropout value for context scores. Default: 0.0 + bias (bool): Use bias in learnable layers. Default: True + Shape: + - Input: :math:`(N, C, P, N)` where :math:`N` is the batch size, :math:`C` is the input channels, + :math:`P` is the number of pixels in the patch, and :math:`N` is the number of patches + - Output: same as the input + .. note:: + For MobileViTv2, we unfold the feature map [B, C, H, W] into [B, C, P, N] where P is the number of pixels + in a patch and N is the number of patches. Because channel is the first dimension in this unfolded tensor, + we use point-wise convolution (instead of a linear layer). This avoids a transpose operation (which may be + expensive on resource-constrained devices) that may be required to convert the unfolded tensor from + channel-first to channel-last format in case of a linear layer. + """ + + def __init__( + self, + embed_dim: int, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + self.embed_dim = embed_dim + + self.qkv_proj = nn.Conv2d( + in_channels=embed_dim, + out_channels=1 + (2 * embed_dim), + bias=bias, + kernel_size=1, + ) + self.attn_drop = nn.Dropout(attn_drop) + self.out_proj = nn.Conv2d( + in_channels=embed_dim, + out_channels=embed_dim, + bias=bias, + kernel_size=1, + ) + self.out_drop = nn.Dropout(proj_drop) + + def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor: + # [B, C, P, N] --> [B, h + 2d, P, N] + qkv = self.qkv_proj(x) + + # Project x into query, key and value + # Query --> [B, 1, P, N] + # value, key --> [B, d, P, N] + query, key, value = qkv.split([1, self.embed_dim, self.embed_dim], dim=1) + + # apply softmax along N dimension + context_scores = F.softmax(query, dim=-1) + context_scores = self.attn_drop(context_scores) + + # Compute context vector + # [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] --> [B, d, P, 1] + context_vector = (key * context_scores).sum(dim=-1, keepdim=True) + + # combine context vector with values + # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] + out = F.relu(value) * context_vector.expand_as(value) + out = self.out_proj(out) + out = self.out_drop(out) + return out + + @torch.jit.ignore() + def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: + # x --> [B, C, P, N] + # x_prev = [B, C, P, M] + batch_size, in_dim, kv_patch_area, kv_num_patches = x.shape + q_patch_area, q_num_patches = x.shape[-2:] + + assert ( + kv_patch_area == q_patch_area + ), "The number of pixels in a patch for query and key_value should be the same" + + # compute query, key, and value + # [B, C, P, M] --> [B, 1 + d, P, M] + qk = F.conv2d( + x_prev, + weight=self.qkv_proj.weight[:self.embed_dim + 1], + bias=self.qkv_proj.bias[:self.embed_dim + 1], + ) + + # [B, 1 + d, P, M] --> [B, 1, P, M], [B, d, P, M] + query, key = qk.split([1, self.embed_dim], dim=1) + # [B, C, P, N] --> [B, d, P, N] + value = F.conv2d( + x, + weight=self.qkv_proj.weight[self.embed_dim + 1], + bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None, + ) + + # apply softmax along M dimension + context_scores = F.softmax(query, dim=-1) + context_scores = self.attn_drop(context_scores) + + # compute context vector + # [B, d, P, M] * [B, 1, P, M] -> [B, d, P, M] --> [B, d, P, 1] + context_vector = (key * context_scores).sum(dim=-1, keepdim=True) + + # combine context vector with values + # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] + out = F.relu(value) * context_vector.expand_as(value) + out = self.out_proj(out) + out = self.out_drop(out) + return out + + def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: + if x_prev is None: + return self._forward_self_attn(x) + else: + return self._forward_cross_attn(x, x_prev=x_prev) + + +class LinearTransformerBlock(nn.Module): + """ + This class defines the pre-norm transformer encoder with linear self-attention in `MobileViTv2 paper <>`_ + Args: + embed_dim (int): :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, P, N)` + mlp_ratio (float): Inner dimension ratio of the FFN relative to embed_dim + drop (float): Dropout rate. Default: 0.0 + attn_drop (float): Dropout rate for attention in multi-head attention. Default: 0.0 + drop_path (float): Stochastic depth rate Default: 0.0 + norm_layer (Callable): Normalization layer. Default: layer_norm_2d + Shape: + - Input: :math:`(B, C_{in}, P, N)` where :math:`B` is batch size, :math:`C_{in}` is input embedding dim, + :math:`P` is number of pixels in a patch, and :math:`N` is number of patches, + - Output: same shape as the input + """ + + def __init__( + self, + embed_dim: int, + mlp_ratio: float = 2.0, + drop: float = 0.0, + attn_drop: float = 0.0, + drop_path: float = 0.0, + act_layer=None, + norm_layer=None, + ) -> None: + super().__init__() + act_layer = act_layer or nn.SiLU + norm_layer = norm_layer or GroupNorm1 + + self.norm1 = norm_layer(embed_dim) + self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop) + self.drop_path1 = DropPath(drop_path) + + self.norm2 = norm_layer(embed_dim) + self.mlp = ConvMlp( + in_features=embed_dim, + hidden_features=int(embed_dim * mlp_ratio), + act_layer=act_layer, + drop=drop) + self.drop_path2 = DropPath(drop_path) + + def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: + if x_prev is None: + # self-attention + x = x + self.drop_path1(self.attn(self.norm1(x))) + else: + # cross-attention + res = x + x = self.norm1(x) # norm + x = self.attn(x, x_prev) # attn + x = self.drop_path1(x) + res # residual + + # Feed forward network + x = x + self.drop_path2(self.mlp(self.norm2(x))) + return x + + +@register_notrace_module +class MobileVitV2Block(nn.Module): + """ + This class defines the `MobileViTv2 block <>`_ + """ + + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + kernel_size: int = 3, + bottle_ratio: float = 1.0, + group_size: Optional[int] = 1, + dilation: Tuple[int, int] = (1, 1), + mlp_ratio: float = 2.0, + transformer_dim: Optional[int] = None, + transformer_depth: int = 2, + patch_size: int = 8, + attn_drop: float = 0., + drop: int = 0., + drop_path_rate: float = 0., + layers: LayerFn = None, + transformer_norm_layer: Callable = GroupNorm1, + **kwargs, # eat unused args + ): + super(MobileVitV2Block, self).__init__() + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + out_chs = out_chs or in_chs + transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) + + self.conv_kxk = layers.conv_norm_act( + in_chs, in_chs, kernel_size=kernel_size, + stride=1, groups=groups, dilation=dilation[0]) + self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) + + self.transformer = nn.Sequential(*[ + LinearTransformerBlock( + transformer_dim, + mlp_ratio=mlp_ratio, + attn_drop=attn_drop, + drop=drop, + drop_path=drop_path_rate, + act_layer=layers.act, + norm_layer=transformer_norm_layer + ) + for _ in range(transformer_depth) + ]) + self.norm = transformer_norm_layer(transformer_dim) + + self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False) + + self.patch_size = to_2tuple(patch_size) + self.patch_area = self.patch_size[0] * self.patch_size[1] + self.coreml_exportable = is_exportable() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, C, H, W = x.shape + patch_h, patch_w = self.patch_size + new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w + num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w + num_patches = num_patch_h * num_patch_w # N + if new_h != H or new_w != W: + x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=True) + + # Local representation + x = self.conv_kxk(x) + x = self.conv_1x1(x) + + # Unfold (feature map -> patches), [B, C, H, W] -> [B, C, P, N] + C = x.shape[1] + if self.coreml_exportable: + x = F.unfold(x, kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w)) + else: + x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4) + x = x.reshape(B, C, -1, num_patches) + + # Global representations + x = self.transformer(x) + x = self.norm(x) + + # Fold (patches -> feature map), [B, C, P, N] --> [B, C, H, W] + if self.coreml_exportable: + # adopted from https://github.com/apple/ml-cvnets/blob/main/cvnets/modules/mobilevit_block.py#L609-L624 + x = x.reshape(B, C * patch_h * patch_w, num_patch_h, num_patch_w) + x = F.pixel_shuffle(x, upscale_factor=patch_h) + else: + x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3) + x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) + + x = self.conv_proj(x) + return x + + +register_block('mobilevit', MobileVitBlock) +register_block('mobilevit2', MobileVitV2Block) + + +def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': (0., 0., 0.), 'std': (1., 1., 1.), + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + 'fixed_input_size': False, + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'mobilevit_xxs.cvnets_in1k': _cfg(hf_hub_id='timm/'), + 'mobilevit_xs.cvnets_in1k': _cfg(hf_hub_id='timm/'), + 'mobilevit_s.cvnets_in1k': _cfg(hf_hub_id='timm/'), + + 'mobilevitv2_050.cvnets_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_075.cvnets_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_100.cvnets_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_125.cvnets_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_150.cvnets_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_175.cvnets_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_200.cvnets_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + + 'mobilevitv2_150.cvnets_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_175.cvnets_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + 'mobilevitv2_200.cvnets_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + crop_pct=0.888), + + 'mobilevitv2_150.cvnets_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'mobilevitv2_175.cvnets_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'mobilevitv2_200.cvnets_in22k_ft_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), +}) + + +@register_model +def mobilevit_xxs(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevit_xs(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevit_s(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_050(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_075(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_100(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_125(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_150(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_175(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_200(pretrained=False, **kwargs) -> ByobNet: + return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs) + + +register_model_deprecations(__name__, { + 'mobilevitv2_150_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k', + 'mobilevitv2_175_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k', + 'mobilevitv2_200_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k', + + 'mobilevitv2_150_384_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k_384', + 'mobilevitv2_175_384_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k_384', + 'mobilevitv2_200_384_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k_384', +}) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/mvitv2.py b/pytorch-image-models/timm/models/mvitv2.py new file mode 100644 index 0000000000000000000000000000000000000000..167ebb9e823b52f0a76d63f99dd8fc89d1ef9d5f --- /dev/null +++ b/pytorch-image-models/timm/models/mvitv2.py @@ -0,0 +1,1120 @@ +""" Multi-Scale Vision Transformer v2 + +@inproceedings{li2021improved, + title={MViTv2: Improved multiscale vision transformers for classification and detection}, + author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, + booktitle={CVPR}, + year={2022} +} + +Code adapted from original Apache 2.0 licensed impl at https://github.com/facebookresearch/mvit +Original copyright below. + +Modifications and timm support by / Copyright 2022, Ross Wightman +""" +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved. All Rights Reserved. +import operator +from collections import OrderedDict +from dataclasses import dataclass +from functools import partial, reduce +from typing import Union, List, Tuple, Optional + +import torch +import torch.utils.checkpoint as checkpoint +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import Mlp, DropPath, trunc_normal_tf_, get_norm_layer, to_2tuple +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_function +from ._registry import register_model, register_model_deprecations, generate_default_cfgs + +__all__ = ['MultiScaleVit', 'MultiScaleVitCfg'] # model_registry will add each entrypoint fn to this + + +@dataclass +class MultiScaleVitCfg: + depths: Tuple[int, ...] = (2, 3, 16, 3) + embed_dim: Union[int, Tuple[int, ...]] = 96 + num_heads: Union[int, Tuple[int, ...]] = 1 + mlp_ratio: float = 4. + pool_first: bool = False + expand_attn: bool = True + qkv_bias: bool = True + use_cls_token: bool = False + use_abs_pos: bool = False + residual_pooling: bool = True + mode: str = 'conv' + kernel_qkv: Tuple[int, int] = (3, 3) + stride_q: Optional[Tuple[Tuple[int, int]]] = ((1, 1), (2, 2), (2, 2), (2, 2)) + stride_kv: Optional[Tuple[Tuple[int, int]]] = None + stride_kv_adaptive: Optional[Tuple[int, int]] = (4, 4) + patch_kernel: Tuple[int, int] = (7, 7) + patch_stride: Tuple[int, int] = (4, 4) + patch_padding: Tuple[int, int] = (3, 3) + pool_type: str = 'max' + rel_pos_type: str = 'spatial' + act_layer: Union[str, Tuple[str, str]] = 'gelu' + norm_layer: Union[str, Tuple[str, str]] = 'layernorm' + norm_eps: float = 1e-6 + + def __post_init__(self): + num_stages = len(self.depths) + if not isinstance(self.embed_dim, (tuple, list)): + self.embed_dim = tuple(self.embed_dim * 2 ** i for i in range(num_stages)) + assert len(self.embed_dim) == num_stages + + if not isinstance(self.num_heads, (tuple, list)): + self.num_heads = tuple(self.num_heads * 2 ** i for i in range(num_stages)) + assert len(self.num_heads) == num_stages + + if self.stride_kv_adaptive is not None and self.stride_kv is None: + _stride_kv = self.stride_kv_adaptive + pool_kv_stride = [] + for i in range(num_stages): + if min(self.stride_q[i]) > 1: + _stride_kv = [ + max(_stride_kv[d] // self.stride_q[i][d], 1) + for d in range(len(_stride_kv)) + ] + pool_kv_stride.append(tuple(_stride_kv)) + self.stride_kv = tuple(pool_kv_stride) + + +def prod(iterable): + return reduce(operator.mul, iterable, 1) + + +class PatchEmbed(nn.Module): + """ + PatchEmbed. + """ + + def __init__( + self, + dim_in=3, + dim_out=768, + kernel=(7, 7), + stride=(4, 4), + padding=(3, 3), + ): + super().__init__() + + self.proj = nn.Conv2d( + dim_in, + dim_out, + kernel_size=kernel, + stride=stride, + padding=padding, + ) + + def forward(self, x) -> Tuple[torch.Tensor, List[int]]: + x = self.proj(x) + # B C H W -> B HW C + return x.flatten(2).transpose(1, 2), x.shape[-2:] + + +@register_notrace_function +def reshape_pre_pool( + x, + feat_size: List[int], + has_cls_token: bool = True +) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + H, W = feat_size + if has_cls_token: + cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :] + else: + cls_tok = None + x = x.reshape(-1, H, W, x.shape[-1]).permute(0, 3, 1, 2).contiguous() + return x, cls_tok + + +@register_notrace_function +def reshape_post_pool( + x, + num_heads: int, + cls_tok: Optional[torch.Tensor] = None +) -> Tuple[torch.Tensor, List[int]]: + feat_size = [x.shape[2], x.shape[3]] + L_pooled = x.shape[2] * x.shape[3] + x = x.reshape(-1, num_heads, x.shape[1], L_pooled).transpose(2, 3) + if cls_tok is not None: + x = torch.cat((cls_tok, x), dim=2) + return x, feat_size + + +@register_notrace_function +def cal_rel_pos_type( + attn: torch.Tensor, + q: torch.Tensor, + has_cls_token: bool, + q_size: List[int], + k_size: List[int], + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, +): + """ + Spatial Relative Positional Embeddings. + """ + sp_idx = 1 if has_cls_token else 0 + q_h, q_w = q_size + k_h, k_w = k_size + + # Scale up rel pos if shapes for q and k are different. + q_h_ratio = max(k_h / q_h, 1.0) + k_h_ratio = max(q_h / k_h, 1.0) + dist_h = ( + torch.arange(q_h, device=q.device).unsqueeze(-1) * q_h_ratio - + torch.arange(k_h, device=q.device).unsqueeze(0) * k_h_ratio + ) + dist_h += (k_h - 1) * k_h_ratio + q_w_ratio = max(k_w / q_w, 1.0) + k_w_ratio = max(q_w / k_w, 1.0) + dist_w = ( + torch.arange(q_w, device=q.device).unsqueeze(-1) * q_w_ratio - + torch.arange(k_w, device=q.device).unsqueeze(0) * k_w_ratio + ) + dist_w += (k_w - 1) * k_w_ratio + + rel_h = rel_pos_h[dist_h.long()] + rel_w = rel_pos_w[dist_w.long()] + + B, n_head, q_N, dim = q.shape + + r_q = q[:, :, sp_idx:].reshape(B, n_head, q_h, q_w, dim) + rel_h = torch.einsum("byhwc,hkc->byhwk", r_q, rel_h) + rel_w = torch.einsum("byhwc,wkc->byhwk", r_q, rel_w) + + attn[:, :, sp_idx:, sp_idx:] = ( + attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w) + + rel_h.unsqueeze(-1) + + rel_w.unsqueeze(-2) + ).view(B, -1, q_h * q_w, k_h * k_w) + + return attn + + +class MultiScaleAttentionPoolFirst(nn.Module): + def __init__( + self, + dim, + dim_out, + feat_size, + num_heads=8, + qkv_bias=True, + mode="conv", + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + has_cls_token=True, + rel_pos_type='spatial', + residual_pooling=True, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.num_heads = num_heads + self.dim_out = dim_out + self.head_dim = dim_out // num_heads + self.scale = self.head_dim ** -0.5 + self.has_cls_token = has_cls_token + padding_q = tuple([int(q // 2) for q in kernel_q]) + padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) + + self.q = nn.Linear(dim, dim_out, bias=qkv_bias) + self.k = nn.Linear(dim, dim_out, bias=qkv_bias) + self.v = nn.Linear(dim, dim_out, bias=qkv_bias) + self.proj = nn.Linear(dim_out, dim_out) + + # Skip pooling with kernel and stride size of (1, 1, 1). + if prod(kernel_q) == 1 and prod(stride_q) == 1: + kernel_q = None + if prod(kernel_kv) == 1 and prod(stride_kv) == 1: + kernel_kv = None + self.mode = mode + self.unshared = mode == 'conv_unshared' + self.pool_q, self.pool_k, self.pool_v = None, None, None + self.norm_q, self.norm_k, self.norm_v = None, None, None + if mode in ("avg", "max"): + pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d + if kernel_q: + self.pool_q = pool_op(kernel_q, stride_q, padding_q) + if kernel_kv: + self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) + self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) + elif mode == "conv" or mode == "conv_unshared": + dim_conv = dim // num_heads if mode == "conv" else dim + if kernel_q: + self.pool_q = nn.Conv2d( + dim_conv, + dim_conv, + kernel_q, + stride=stride_q, + padding=padding_q, + groups=dim_conv, + bias=False, + ) + self.norm_q = norm_layer(dim_conv) + if kernel_kv: + self.pool_k = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_k = norm_layer(dim_conv) + self.pool_v = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_v = norm_layer(dim_conv) + else: + raise NotImplementedError(f"Unsupported model {mode}") + + # relative pos embedding + self.rel_pos_type = rel_pos_type + if self.rel_pos_type == 'spatial': + assert feat_size[0] == feat_size[1] + size = feat_size[0] + q_size = size // stride_q[1] if len(stride_q) > 0 else size + kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size + rel_sp_dim = 2 * max(q_size, kv_size) - 1 + + self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + trunc_normal_tf_(self.rel_pos_h, std=0.02) + trunc_normal_tf_(self.rel_pos_w, std=0.02) + + self.residual_pooling = residual_pooling + + def forward(self, x, feat_size: List[int]): + B, N, _ = x.shape + + fold_dim = 1 if self.unshared else self.num_heads + x = x.reshape(B, N, fold_dim, -1).permute(0, 2, 1, 3) + q = k = v = x + + if self.pool_q is not None: + q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) + q = self.pool_q(q) + q, q_size = reshape_post_pool(q, self.num_heads, q_tok) + else: + q_size = feat_size + if self.norm_q is not None: + q = self.norm_q(q) + + if self.pool_k is not None: + k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) + k = self.pool_k(k) + k, k_size = reshape_post_pool(k, self.num_heads, k_tok) + else: + k_size = feat_size + if self.norm_k is not None: + k = self.norm_k(k) + + if self.pool_v is not None: + v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) + v = self.pool_v(v) + v, v_size = reshape_post_pool(v, self.num_heads, v_tok) + else: + v_size = feat_size + if self.norm_v is not None: + v = self.norm_v(v) + + q_N = q_size[0] * q_size[1] + int(self.has_cls_token) + q = q.transpose(1, 2).reshape(B, q_N, -1) + q = self.q(q).reshape(B, q_N, self.num_heads, -1).transpose(1, 2) + + k_N = k_size[0] * k_size[1] + int(self.has_cls_token) + k = k.transpose(1, 2).reshape(B, k_N, -1) + k = self.k(k).reshape(B, k_N, self.num_heads, -1) + + v_N = v_size[0] * v_size[1] + int(self.has_cls_token) + v = v.transpose(1, 2).reshape(B, v_N, -1) + v = self.v(v).reshape(B, v_N, self.num_heads, -1).transpose(1, 2) + + attn = (q * self.scale) @ k + if self.rel_pos_type == 'spatial': + attn = cal_rel_pos_type( + attn, + q, + self.has_cls_token, + q_size, + k_size, + self.rel_pos_h, + self.rel_pos_w, + ) + attn = attn.softmax(dim=-1) + x = attn @ v + + if self.residual_pooling: + x = x + q + + x = x.transpose(1, 2).reshape(B, -1, self.dim_out) + x = self.proj(x) + + return x, q_size + + +class MultiScaleAttention(nn.Module): + def __init__( + self, + dim, + dim_out, + feat_size, + num_heads=8, + qkv_bias=True, + mode="conv", + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + has_cls_token=True, + rel_pos_type='spatial', + residual_pooling=True, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.num_heads = num_heads + self.dim_out = dim_out + self.head_dim = dim_out // num_heads + self.scale = self.head_dim ** -0.5 + self.has_cls_token = has_cls_token + padding_q = tuple([int(q // 2) for q in kernel_q]) + padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) + + self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) + self.proj = nn.Linear(dim_out, dim_out) + + # Skip pooling with kernel and stride size of (1, 1, 1). + if prod(kernel_q) == 1 and prod(stride_q) == 1: + kernel_q = None + if prod(kernel_kv) == 1 and prod(stride_kv) == 1: + kernel_kv = None + self.mode = mode + self.unshared = mode == 'conv_unshared' + self.norm_q, self.norm_k, self.norm_v = None, None, None + self.pool_q, self.pool_k, self.pool_v = None, None, None + if mode in ("avg", "max"): + pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d + if kernel_q: + self.pool_q = pool_op(kernel_q, stride_q, padding_q) + if kernel_kv: + self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) + self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) + elif mode == "conv" or mode == "conv_unshared": + dim_conv = dim_out // num_heads if mode == "conv" else dim_out + if kernel_q: + self.pool_q = nn.Conv2d( + dim_conv, + dim_conv, + kernel_q, + stride=stride_q, + padding=padding_q, + groups=dim_conv, + bias=False, + ) + self.norm_q = norm_layer(dim_conv) + if kernel_kv: + self.pool_k = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_k = norm_layer(dim_conv) + self.pool_v = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_v = norm_layer(dim_conv) + else: + raise NotImplementedError(f"Unsupported model {mode}") + + # relative pos embedding + self.rel_pos_type = rel_pos_type + if self.rel_pos_type == 'spatial': + assert feat_size[0] == feat_size[1] + size = feat_size[0] + q_size = size // stride_q[1] if len(stride_q) > 0 else size + kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size + rel_sp_dim = 2 * max(q_size, kv_size) - 1 + + self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + trunc_normal_tf_(self.rel_pos_h, std=0.02) + trunc_normal_tf_(self.rel_pos_w, std=0.02) + + self.residual_pooling = residual_pooling + + def forward(self, x, feat_size: List[int]): + B, N, _ = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(dim=0) + + if self.pool_q is not None: + q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) + q = self.pool_q(q) + q, q_size = reshape_post_pool(q, self.num_heads, q_tok) + else: + q_size = feat_size + if self.norm_q is not None: + q = self.norm_q(q) + + if self.pool_k is not None: + k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) + k = self.pool_k(k) + k, k_size = reshape_post_pool(k, self.num_heads, k_tok) + else: + k_size = feat_size + if self.norm_k is not None: + k = self.norm_k(k) + + if self.pool_v is not None: + v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) + v = self.pool_v(v) + v, _ = reshape_post_pool(v, self.num_heads, v_tok) + if self.norm_v is not None: + v = self.norm_v(v) + + attn = (q * self.scale) @ k.transpose(-2, -1) + if self.rel_pos_type == 'spatial': + attn = cal_rel_pos_type( + attn, + q, + self.has_cls_token, + q_size, + k_size, + self.rel_pos_h, + self.rel_pos_w, + ) + attn = attn.softmax(dim=-1) + x = attn @ v + + if self.residual_pooling: + x = x + q + + x = x.transpose(1, 2).reshape(B, -1, self.dim_out) + x = self.proj(x) + + return x, q_size + + +class MultiScaleBlock(nn.Module): + def __init__( + self, + dim, + dim_out, + num_heads, + feat_size, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_layer=nn.LayerNorm, + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + mode="conv", + has_cls_token=True, + expand_attn=False, + pool_first=False, + rel_pos_type='spatial', + residual_pooling=True, + ): + super().__init__() + proj_needed = dim != dim_out + self.dim = dim + self.dim_out = dim_out + self.has_cls_token = has_cls_token + + self.norm1 = norm_layer(dim) + + self.shortcut_proj_attn = nn.Linear(dim, dim_out) if proj_needed and expand_attn else None + if stride_q and prod(stride_q) > 1: + kernel_skip = [s + 1 if s > 1 else s for s in stride_q] + stride_skip = stride_q + padding_skip = [int(skip // 2) for skip in kernel_skip] + self.shortcut_pool_attn = nn.MaxPool2d(kernel_skip, stride_skip, padding_skip) + else: + self.shortcut_pool_attn = None + + att_dim = dim_out if expand_attn else dim + attn_layer = MultiScaleAttentionPoolFirst if pool_first else MultiScaleAttention + self.attn = attn_layer( + dim, + att_dim, + num_heads=num_heads, + feat_size=feat_size, + qkv_bias=qkv_bias, + kernel_q=kernel_q, + kernel_kv=kernel_kv, + stride_q=stride_q, + stride_kv=stride_kv, + norm_layer=norm_layer, + has_cls_token=has_cls_token, + mode=mode, + rel_pos_type=rel_pos_type, + residual_pooling=residual_pooling, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(att_dim) + mlp_dim_out = dim_out + self.shortcut_proj_mlp = nn.Linear(dim, dim_out) if proj_needed and not expand_attn else None + self.mlp = Mlp( + in_features=att_dim, + hidden_features=int(att_dim * mlp_ratio), + out_features=mlp_dim_out, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def _shortcut_pool(self, x, feat_size: List[int]): + if self.shortcut_pool_attn is None: + return x + if self.has_cls_token: + cls_tok, x = x[:, :1, :], x[:, 1:, :] + else: + cls_tok = None + B, L, C = x.shape + H, W = feat_size + x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous() + x = self.shortcut_pool_attn(x) + x = x.reshape(B, C, -1).transpose(1, 2) + if cls_tok is not None: + x = torch.cat((cls_tok, x), dim=1) + return x + + def forward(self, x, feat_size: List[int]): + x_norm = self.norm1(x) + # NOTE as per the original impl, this seems odd, but shortcut uses un-normalized input if no proj + x_shortcut = x if self.shortcut_proj_attn is None else self.shortcut_proj_attn(x_norm) + x_shortcut = self._shortcut_pool(x_shortcut, feat_size) + x, feat_size_new = self.attn(x_norm, feat_size) + x = x_shortcut + self.drop_path1(x) + + x_norm = self.norm2(x) + x_shortcut = x if self.shortcut_proj_mlp is None else self.shortcut_proj_mlp(x_norm) + x = x_shortcut + self.drop_path2(self.mlp(x_norm)) + return x, feat_size_new + + +class MultiScaleVitStage(nn.Module): + + def __init__( + self, + dim, + dim_out, + depth, + num_heads, + feat_size, + mlp_ratio=4.0, + qkv_bias=True, + mode="conv", + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + has_cls_token=True, + expand_attn=False, + pool_first=False, + rel_pos_type='spatial', + residual_pooling=True, + norm_layer=nn.LayerNorm, + drop_path=0.0, + ): + super().__init__() + self.grad_checkpointing = False + + self.blocks = nn.ModuleList() + if expand_attn: + out_dims = (dim_out,) * depth + else: + out_dims = (dim,) * (depth - 1) + (dim_out,) + + for i in range(depth): + attention_block = MultiScaleBlock( + dim=dim, + dim_out=out_dims[i], + num_heads=num_heads, + feat_size=feat_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + kernel_q=kernel_q, + kernel_kv=kernel_kv, + stride_q=stride_q if i == 0 else (1, 1), + stride_kv=stride_kv, + mode=mode, + has_cls_token=has_cls_token, + pool_first=pool_first, + rel_pos_type=rel_pos_type, + residual_pooling=residual_pooling, + expand_attn=expand_attn, + norm_layer=norm_layer, + drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path, + ) + dim = out_dims[i] + self.blocks.append(attention_block) + if i == 0: + feat_size = tuple([size // stride for size, stride in zip(feat_size, stride_q)]) + + self.feat_size = feat_size + + def forward(self, x, feat_size: List[int]): + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x, feat_size = checkpoint.checkpoint(blk, x, feat_size) + else: + x, feat_size = blk(x, feat_size) + return x, feat_size + + +class MultiScaleVit(nn.Module): + """ + Improved Multiscale Vision Transformers for Classification and Detection + Yanghao Li*, Chao-Yuan Wu*, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, + Christoph Feichtenhofer* + https://arxiv.org/abs/2112.01526 + + Multiscale Vision Transformers + Haoqi Fan*, Bo Xiong*, Karttikeya Mangalam*, Yanghao Li*, Zhicheng Yan, Jitendra Malik, + Christoph Feichtenhofer* + https://arxiv.org/abs/2104.11227 + """ + + def __init__( + self, + cfg: MultiScaleVitCfg, + img_size: Tuple[int, int] = (224, 224), + in_chans: int = 3, + global_pool: Optional[str] = None, + num_classes: int = 1000, + drop_path_rate: float = 0., + drop_rate: float = 0., + ): + super().__init__() + img_size = to_2tuple(img_size) + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) + self.num_classes = num_classes + self.drop_rate = drop_rate + if global_pool is None: + global_pool = 'token' if cfg.use_cls_token else 'avg' + self.global_pool = global_pool + self.depths = tuple(cfg.depths) + self.expand_attn = cfg.expand_attn + + embed_dim = cfg.embed_dim[0] + self.patch_embed = PatchEmbed( + dim_in=in_chans, + dim_out=embed_dim, + kernel=cfg.patch_kernel, + stride=cfg.patch_stride, + padding=cfg.patch_padding, + ) + patch_dims = (img_size[0] // cfg.patch_stride[0], img_size[1] // cfg.patch_stride[1]) + num_patches = prod(patch_dims) + + if cfg.use_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.num_prefix_tokens = 1 + pos_embed_dim = num_patches + 1 + else: + self.num_prefix_tokens = 0 + self.cls_token = None + pos_embed_dim = num_patches + + if cfg.use_abs_pos: + self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_dim, embed_dim)) + else: + self.pos_embed = None + + num_stages = len(cfg.embed_dim) + feat_size = patch_dims + curr_stride = max(cfg.patch_stride) + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + self.stages = nn.ModuleList() + self.feature_info = [] + for i in range(num_stages): + if cfg.expand_attn: + dim_out = cfg.embed_dim[i] + else: + dim_out = cfg.embed_dim[min(i + 1, num_stages - 1)] + stage = MultiScaleVitStage( + dim=embed_dim, + dim_out=dim_out, + depth=cfg.depths[i], + num_heads=cfg.num_heads[i], + feat_size=feat_size, + mlp_ratio=cfg.mlp_ratio, + qkv_bias=cfg.qkv_bias, + mode=cfg.mode, + pool_first=cfg.pool_first, + expand_attn=cfg.expand_attn, + kernel_q=cfg.kernel_qkv, + kernel_kv=cfg.kernel_qkv, + stride_q=cfg.stride_q[i], + stride_kv=cfg.stride_kv[i], + has_cls_token=cfg.use_cls_token, + rel_pos_type=cfg.rel_pos_type, + residual_pooling=cfg.residual_pooling, + norm_layer=norm_layer, + drop_path=dpr[i], + ) + curr_stride *= max(cfg.stride_q[i]) + self.feature_info += [dict(module=f'block.{i}', num_chs=dim_out, reduction=curr_stride)] + embed_dim = dim_out + feat_size = stage.feat_size + self.stages.append(stage) + + self.num_features = self.head_hidden_size = embed_dim + self.norm = norm_layer(embed_dim) + self.head = nn.Sequential(OrderedDict([ + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) + ])) + + if self.pos_embed is not None: + trunc_normal_tf_(self.pos_embed, std=0.02) + if self.cls_token is not None: + trunc_normal_tf_(self.cls_token, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_tf_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {k for k, _ in self.named_parameters() + if any(n in k for n in ["pos_embed", "rel_pos_h", "rel_pos_w", "cls_token"])} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', # stem and embed + blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Sequential(OrderedDict([ + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) + ])) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to all intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW', 'NLC'), 'Output shape must be NCHW or NLC.' + reshape = output_fmt == 'NCHW' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.stages), indices) + + # FIXME slice block/pos_block if < max + # forward pass + x, feat_size = self.patch_embed(x) + B = x.shape[0] + if self.cls_token is not None: + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + + for i, stage in enumerate(self.stages): + x, feat_size = stage(x, feat_size) + if i in take_indices: + if norm and i == (len(self.stages) - 1): + x_inter = self.norm(x) # applying final norm last intermediate + else: + x_inter = x + if reshape: + if self.cls_token is not None: + # possible to allow return of class tokens, TBD + x_inter = x_inter[:, 1:] + x_inter = x_inter.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2) + intermediates.append(x_inter) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.stages), indices) + # FIXME add stage pruning + # self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x, feat_size = self.patch_embed(x) + B, N, C = x.shape + + if self.cls_token is not None: + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + if self.pos_embed is not None: + x = x + self.pos_embed + + for stage in self.stages: + x, feat_size = stage(x, feat_size) + + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + if self.global_pool == 'avg': + x = x[:, self.num_prefix_tokens:].mean(1) + else: + x = x[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'stages.0.blocks.0.norm1.weight' in state_dict: + # native checkpoint, look for rel_pos interpolations + for k in state_dict.keys(): + if 'rel_pos' in k: + rel_pos = state_dict[k] + dest_rel_pos_shape = model.state_dict()[k].shape + if rel_pos.shape[0] != dest_rel_pos_shape[0]: + rel_pos_resized = torch.nn.functional.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=dest_rel_pos_shape[0], + mode="linear", + ) + state_dict[k] = rel_pos_resized.reshape(-1, dest_rel_pos_shape[0]).permute(1, 0) + return state_dict + + import re + if 'model_state' in state_dict: + state_dict = state_dict['model_state'] + + depths = getattr(model, 'depths', None) + expand_attn = getattr(model, 'expand_attn', True) + assert depths is not None, 'model requires depth attribute to remap checkpoints' + depth_map = {} + block_idx = 0 + for stage_idx, d in enumerate(depths): + depth_map.update({i: (stage_idx, i - block_idx) for i in range(block_idx, block_idx + d)}) + block_idx += d + + out_dict = {} + for k, v in state_dict.items(): + k = re.sub( + r'blocks\.(\d+)', + lambda x: f'stages.{depth_map[int(x.group(1))][0]}.blocks.{depth_map[int(x.group(1))][1]}', + k) + + if expand_attn: + k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_attn', k) + else: + k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_mlp', k) + if 'head' in k: + k = k.replace('head.projection', 'head.fc') + out_dict[k] = v + + return out_dict + + +model_cfgs = dict( + mvitv2_tiny=MultiScaleVitCfg( + depths=(1, 2, 5, 2), + ), + mvitv2_small=MultiScaleVitCfg( + depths=(1, 2, 11, 2), + ), + mvitv2_base=MultiScaleVitCfg( + depths=(2, 3, 16, 3), + ), + mvitv2_large=MultiScaleVitCfg( + depths=(2, 6, 36, 4), + embed_dim=144, + num_heads=2, + expand_attn=False, + ), + + mvitv2_small_cls=MultiScaleVitCfg( + depths=(1, 2, 11, 2), + use_cls_token=True, + ), + mvitv2_base_cls=MultiScaleVitCfg( + depths=(2, 3, 16, 3), + use_cls_token=True, + ), + mvitv2_large_cls=MultiScaleVitCfg( + depths=(2, 6, 36, 4), + embed_dim=144, + num_heads=2, + use_cls_token=True, + expand_attn=True, + ), + mvitv2_huge_cls=MultiScaleVitCfg( + depths=(4, 8, 60, 8), + embed_dim=192, + num_heads=3, + use_cls_token=True, + expand_attn=True, + ), +) + + +def _create_mvitv2(variant, cfg_variant=None, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', 4) + return build_model_with_cfg( + MultiScaleVit, + variant, + pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', + 'fixed_input_size': True, + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'mvitv2_tiny.fb_in1k': _cfg( + url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth', + hf_hub_id='timm/'), + 'mvitv2_small.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth', + hf_hub_id='timm/'), + 'mvitv2_base.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth', + hf_hub_id='timm/'), + 'mvitv2_large.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth', + hf_hub_id='timm/'), + + 'mvitv2_small_cls': _cfg(url=''), + 'mvitv2_base_cls.fb_inw21k': _cfg( + url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in21k.pyth', + hf_hub_id='timm/', + num_classes=19168), + 'mvitv2_large_cls.fb_inw21k': _cfg( + url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in21k.pyth', + hf_hub_id='timm/', + num_classes=19168), + 'mvitv2_huge_cls.fb_inw21k': _cfg( + url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_H_in21k.pyth', + hf_hub_id='timm/', + num_classes=19168), +}) + + +@register_model +def mvitv2_tiny(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_tiny', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_small(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_small', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_base(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_base', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_large(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_large', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_small_cls(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_small_cls', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_base_cls(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_base_cls', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_large_cls(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_large_cls', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_huge_cls(pretrained=False, **kwargs) -> MultiScaleVit: + return _create_mvitv2('mvitv2_huge_cls', pretrained=pretrained, **kwargs) diff --git a/pytorch-image-models/timm/models/nasnet.py b/pytorch-image-models/timm/models/nasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..0bcc0485682e329e35a4a4f56bae0614bb54a91b --- /dev/null +++ b/pytorch-image-models/timm/models/nasnet.py @@ -0,0 +1,600 @@ +""" NasNet-A (Large) + nasnetalarge implementation grabbed from Cadene's pretrained models + https://github.com/Cadene/pretrained-models.pytorch +""" +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['NASNetALarge'] + + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=0) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) + self.act_2 = nn.ReLU(inplace=True) + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class CellStem0(nn.Module): + def __init__(self, stem_size, num_channels=42, pad_type=''): + super(CellStem0, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x): + x1 = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x1) + x_comb_iter_0_right = self.comb_iter_0_right(x) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x1) + x_comb_iter_1_right = self.comb_iter_1_right(x) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x1) + x_comb_iter_2_right = self.comb_iter_2_right(x) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x1) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem1(nn.Module): + + def __init__(self, stem_size, num_channels, pad_type=''): + super(CellStem1, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x_conv0, x_stem_0): + x_left = self.conv_1x1(x_stem_0) + + x_relu = self.act(x_conv0) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2(x_relu) + # final path + x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_right) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_left) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_left) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class FirstCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(FirstCell, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_relu = self.act(x_prev) + x_path1 = self.path_1(x_relu) + x_path2 = self.path_2(x_relu) + x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NormalCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(NormalCell, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell0(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell0, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell1(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell1, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NASNetALarge(nn.Module): + """NASNetALarge (6 @ 4032) """ + + def __init__( + self, + num_classes=1000, + in_chans=3, + stem_size=96, + channel_multiplier=2, + num_features=4032, + output_stride=32, + drop_rate=0., + global_pool='avg', + pad_type='same', + ): + super(NASNetALarge, self).__init__() + self.num_classes = num_classes + self.stem_size = stem_size + self.num_features = self.head_hidden_size = num_features + self.channel_multiplier = channel_multiplier + assert output_stride == 32 + + channels = self.num_features // 24 + # 24 is default value for the architecture + + self.conv0 = ConvNormAct( + in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) + self.cell_stem_1 = CellStem1( + self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) + + self.cell_0 = FirstCell( + in_chs_left=channels, out_chs_left=channels // 2, + in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_1 = NormalCell( + in_chs_left=2 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_2 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_3 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_4 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_5 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + + self.reduction_cell_0 = ReductionCell0( + in_chs_left=6 * channels, out_chs_left=2 * channels, + in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_6 = FirstCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_7 = NormalCell( + in_chs_left=8 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_8 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_9 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_10 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_11 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + + self.reduction_cell_1 = ReductionCell1( + in_chs_left=12 * channels, out_chs_left=4 * channels, + in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_12 = FirstCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_13 = NormalCell( + in_chs_left=16 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_14 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_15 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_16 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_17 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.act = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv0'), + dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), + dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), + dict(num_chs=4032, reduction=32, module='act'), + ] + + self.global_pool, self.head_drop, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv0|cell_stem_[01]', + blocks=[ + (r'^cell_(\d+)', None), + (r'^reduction_cell_0', (6,)), + (r'^reduction_cell_1', (12,)), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.last_linear + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv0 = self.conv0(x) + + x_stem_0 = self.cell_stem_0(x_conv0) + x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) + + x_cell_0 = self.cell_0(x_stem_1, x_stem_0) + x_cell_1 = self.cell_1(x_cell_0, x_stem_1) + x_cell_2 = self.cell_2(x_cell_1, x_cell_0) + x_cell_3 = self.cell_3(x_cell_2, x_cell_1) + x_cell_4 = self.cell_4(x_cell_3, x_cell_2) + x_cell_5 = self.cell_5(x_cell_4, x_cell_3) + + x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) + x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) + x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) + x_cell_8 = self.cell_8(x_cell_7, x_cell_6) + x_cell_9 = self.cell_9(x_cell_8, x_cell_7) + x_cell_10 = self.cell_10(x_cell_9, x_cell_8) + x_cell_11 = self.cell_11(x_cell_10, x_cell_9) + + x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) + x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) + x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) + x_cell_14 = self.cell_14(x_cell_13, x_cell_12) + x_cell_15 = self.cell_15(x_cell_14, x_cell_13) + x_cell_16 = self.cell_16(x_cell_15, x_cell_14) + x_cell_17 = self.cell_17(x_cell_16, x_cell_15) + x = self.act(x_cell_17) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.last_linear(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_nasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + NASNetALarge, + variant, + pretrained, + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs, + ) + + +default_cfgs = generate_default_cfgs({ + 'nasnetalarge.tf_in1k': { + 'hf_hub_id': 'timm/', + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv0.conv', + 'classifier': 'last_linear', + }, +}) + + +@register_model +def nasnetalarge(pretrained=False, **kwargs) -> NASNetALarge: + """NASNet-A large model architecture. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_nasnet('nasnetalarge', pretrained, **model_kwargs) diff --git a/pytorch-image-models/timm/models/nest.py b/pytorch-image-models/timm/models/nest.py new file mode 100644 index 0000000000000000000000000000000000000000..1d9c75210566c1420779aceeec7c719bc8fbe2b3 --- /dev/null +++ b/pytorch-image-models/timm/models/nest.py @@ -0,0 +1,583 @@ +""" Nested Transformer (NesT) in PyTorch + +A PyTorch implement of Aggregating Nested Transformers as described in: + +'Aggregating Nested Transformers' + - https://arxiv.org/abs/2105.12723 + +The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights +have been converted with convert/convert_nest_flax.py + +Acknowledgments: +* The paper authors for sharing their research, code, and model weights +* Ross Wightman's existing code off which I based this + +Copyright 2021 Alexander Soare +""" + +import collections.abc +import logging +import math +from functools import partial + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert +from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_function +from ._manipulate import checkpoint_seq, named_apply +from ._registry import register_model, generate_default_cfgs, register_model_deprecations + +__all__ = ['Nest'] # model_registry will add each entrypoint fn to this + +_logger = logging.getLogger(__name__) + + +class Attention(nn.Module): + """ + This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with + an extra "image block" dim + """ + fused_attn: torch.jit.Final[bool] + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """ + x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) + """ + B, T, N, C = x.shape + # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) + qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + if self.fused_attn: + x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) # (B, H, T, N, N) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + # (B, H, T, N, C'), permute -> (B, T, N, C', H) + x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x # (B, T, N, C) + + +class TransformerLayer(nn.Module): + """ + This is much like `.vision_transformer.Block` but: + - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") + - Uses modified Attention layer that handles the "block" dimension + """ + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=proj_drop, + ) + + def forward(self, x): + y = self.norm1(x) + x = x + self.drop_path(self.attn(y)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvPool(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): + super().__init__() + self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) + self.norm = norm_layer(out_channels) + self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) + + def forward(self, x): + """ + x is expected to have shape (B, C, H, W) + """ + _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') + _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') + x = self.conv(x) + # Layer norm done over channel dim only + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + x = self.pool(x) + return x # (B, C, H//2, W//2) + + +def blockify(x, block_size: int): + """image to blocks + Args: + x (Tensor): with shape (B, H, W, C) + block_size (int): edge length of a single square block in units of H, W + """ + B, H, W, C = x.shape + _assert(H % block_size == 0, '`block_size` must divide input height evenly') + _assert(W % block_size == 0, '`block_size` must divide input width evenly') + grid_height = H // block_size + grid_width = W // block_size + x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) + x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) + return x # (B, T, N, C) + + +@register_notrace_function # reason: int receives Proxy +def deblockify(x, block_size: int): + """blocks to image + Args: + x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block + block_size (int): edge length of a single square block in units of desired H, W + """ + B, T, _, C = x.shape + grid_size = int(math.sqrt(T)) + height = width = grid_size * block_size + x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) + x = x.transpose(2, 3).reshape(B, height, width, C) + return x # (B, H, W, C) + + +class NestLevel(nn.Module): + """ Single hierarchical level of a Nested Transformer + """ + def __init__( + self, + num_blocks, + block_size, + seq_length, + num_heads, + depth, + embed_dim, + prev_embed_dim=None, + mlp_ratio=4., + qkv_bias=True, + proj_drop=0., + attn_drop=0., + drop_path=[], + norm_layer=None, + act_layer=None, + pad_type='', + ): + super().__init__() + self.block_size = block_size + self.grad_checkpointing = False + + self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) + + if prev_embed_dim is not None: + self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) + else: + self.pool = nn.Identity() + + # Transformer encoder + if len(drop_path): + assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers' + self.transformer_encoder = nn.Sequential(*[ + TransformerLayer( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[i], + norm_layer=norm_layer, + act_layer=act_layer, + ) + for i in range(depth)]) + + def forward(self, x): + """ + expects x as (B, C, H, W) + """ + x = self.pool(x) + x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer + x = blockify(x, self.block_size) # (B, T, N, C') + x = x + self.pos_embed + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.transformer_encoder, x) + else: + x = self.transformer_encoder(x) # (B, T, N, C') + x = deblockify(x, self.block_size) # (B, H', W', C') + # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage + return x.permute(0, 3, 1, 2) # (B, C, H', W') + + +class Nest(nn.Module): + """ Nested Transformer (NesT) + + A PyTorch impl of : `Aggregating Nested Transformers` + - https://arxiv.org/abs/2105.12723 + """ + + def __init__( + self, + img_size=224, + in_chans=3, + patch_size=4, + num_levels=3, + embed_dims=(128, 256, 512), + num_heads=(4, 8, 16), + depths=(2, 2, 20), + num_classes=1000, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.5, + norm_layer=None, + act_layer=None, + pad_type='', + weight_init='', + global_pool='avg', + ): + """ + Args: + img_size (int, tuple): input image size + in_chans (int): number of input channels + patch_size (int): patch size + num_levels (int): number of block hierarchies (T_d in the paper) + embed_dims (int, tuple): embedding dimensions of each level + num_heads (int, tuple): number of attention heads for each level + depths (int, tuple): number of transformer layers for each level + num_classes (int): number of classes for classification head + mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer for transformer layers + act_layer: (nn.Module): activation layer in MLP of transformer layers + pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME + weight_init: (str): weight init scheme + global_pool: (str): type of pooling operation to apply to final feature map + + Notes: + - Default values follow NesT-B from the original Jax code. + - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. + - For those following the paper, Table A1 may have errors! + - https://github.com/google-research/nested-transformer/issues/2 + """ + super().__init__() + + for param_name in ['embed_dims', 'num_heads', 'depths']: + param_value = locals()[param_name] + if isinstance(param_value, collections.abc.Sequence): + assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' + + embed_dims = to_ntuple(num_levels)(embed_dims) + num_heads = to_ntuple(num_levels)(num_heads) + depths = to_ntuple(num_levels)(depths) + self.num_classes = num_classes + self.num_features = self.head_hidden_size = embed_dims[-1] + self.feature_info = [] + norm_layer = norm_layer or LayerNorm + act_layer = act_layer or nn.GELU + self.drop_rate = drop_rate + self.num_levels = num_levels + if isinstance(img_size, collections.abc.Sequence): + assert img_size[0] == img_size[1], 'Model only handles square inputs' + img_size = img_size[0] + assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' + self.patch_size = patch_size + + # Number of blocks at each level + self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() + assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ + 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' + + # Block edge size in units of patches + # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the + # number of blocks along edge of image + self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) + + # Patch embedding + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dims[0], + flatten=False, + ) + self.num_patches = self.patch_embed.num_patches + self.seq_length = self.num_patches // self.num_blocks[0] + + # Build up each hierarchical level + levels = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + prev_dim = None + curr_stride = 4 + for i in range(len(self.num_blocks)): + dim = embed_dims[i] + levels.append(NestLevel( + self.num_blocks[i], + self.block_size, + self.seq_length, + num_heads[i], + depths[i], + dim, + prev_dim, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dp_rates[i], + norm_layer=norm_layer, + act_layer=act_layer, + pad_type=pad_type, + )) + self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] + prev_dim = dim + curr_stride *= 2 + self.levels = nn.Sequential(*levels) + + # Final normalization layer + self.norm = norm_layer(embed_dims[-1]) + + # Classifier + global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + self.global_pool = global_pool + self.head_drop = nn.Dropout(drop_rate) + self.head = head + + self.init_weights(weight_init) + + @torch.jit.ignore + def init_weights(self, mode=''): + assert mode in ('nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + for level in self.levels: + trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) + named_apply(partial(_init_nest_weights, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {f'level.{i}.pos_embed' for i in range(len(self.levels))} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', # stem and embed + blocks=[ + (r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None), + (r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)), + (r'^norm', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for l in self.levels: + l.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.levels(x) + # Layer norm done over channel dim only (to NHWC and back) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): + """ NesT weight initialization + Can replicate Jax implementation. Otherwise follows vision_transformer.py + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + nn.init.constant_(module.bias, head_bias) + else: + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +def resize_pos_embed(posemb, posemb_new): + """ + Rescale the grid of position embeddings when loading from state_dict + Expected shape of position embeddings is (1, T, N, C), and considers only square images + """ + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + seq_length_old = posemb.shape[2] + num_blocks_new, seq_length_new = posemb_new.shape[1:3] + size_new = int(math.sqrt(num_blocks_new*seq_length_new)) + # First change to (1, C, H, W) + posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) + posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) + # Now change to new (1, T, N, C) + posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ resize positional embeddings of pretrained weights """ + pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] + for k in pos_embed_keys: + if state_dict[k].shape != getattr(model, k).shape: + state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) + return state_dict + + +def _create_nest(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + Nest, + variant, + pretrained, + feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs, + ) + + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], + 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'nest_base.untrained': _cfg(), + 'nest_small.untrained': _cfg(), + 'nest_tiny.untrained': _cfg(), + # (weights from official Google JAX impl, require 'SAME' padding) + 'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'), + 'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'), + 'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def nest_base(pretrained=False, **kwargs) -> Nest: + """ Nest-B @ 224x224 + """ + model_kwargs = dict( + embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_small(pretrained=False, **kwargs) -> Nest: + """ Nest-S @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_tiny(pretrained=False, **kwargs) -> Nest: + """ Nest-T @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_base_jx(pretrained=False, **kwargs) -> Nest: + """ Nest-B @ 224x224 + """ + kwargs.setdefault('pad_type', 'same') + model_kwargs = dict( + embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_small_jx(pretrained=False, **kwargs) -> Nest: + """ Nest-S @ 224x224 + """ + kwargs.setdefault('pad_type', 'same') + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_tiny_jx(pretrained=False, **kwargs) -> Nest: + """ Nest-T @ 224x224 + """ + kwargs.setdefault('pad_type', 'same') + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs) + return model + + +register_model_deprecations(__name__, { + 'jx_nest_base': 'nest_base_jx', + 'jx_nest_small': 'nest_small_jx', + 'jx_nest_tiny': 'nest_tiny_jx', +}) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/nextvit.py b/pytorch-image-models/timm/models/nextvit.py new file mode 100644 index 0000000000000000000000000000000000000000..e05bb8b5f1cb92234da8b8f4fdba196be014e905 --- /dev/null +++ b/pytorch-image-models/timm/models/nextvit.py @@ -0,0 +1,688 @@ +""" Next-ViT + +As described in https://arxiv.org/abs/2207.05501 + +Next-ViT model defs and weights adapted from https://github.com/bytedance/Next-ViT, original copyright below +""" +# Copyright (c) ByteDance Inc. All rights reserved. +from functools import partial +from typing import Optional + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, trunc_normal_, ConvMlp, get_norm_layer, get_act_layer, use_fused_attn +from timm.layers import ClassifierHead +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_function +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['NextViT'] + + +def merge_pre_bn(module, pre_bn_1, pre_bn_2=None): + """ Merge pre BN to reduce inference runtime. + """ + weight = module.weight.data + if module.bias is None: + zeros = torch.zeros(module.out_chs, device=weight.device).type(weight.type()) + module.bias = nn.Parameter(zeros) + bias = module.bias.data + if pre_bn_2 is None: + assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" + assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False" + + scale_invstd = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) + extra_weight = scale_invstd * pre_bn_1.weight + extra_bias = pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd + else: + assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" + assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False" + + assert pre_bn_2.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" + assert pre_bn_2.affine is True, "Unsupported bn_module.affine is False" + + scale_invstd_1 = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) + scale_invstd_2 = pre_bn_2.running_var.add(pre_bn_2.eps).pow(-0.5) + + extra_weight = scale_invstd_1 * pre_bn_1.weight * scale_invstd_2 * pre_bn_2.weight + extra_bias = ( + scale_invstd_2 * pre_bn_2.weight + * (pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd_1 - pre_bn_2.running_mean) + + pre_bn_2.bias + ) + + if isinstance(module, nn.Linear): + extra_bias = weight @ extra_bias + weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) + elif isinstance(module, nn.Conv2d): + assert weight.shape[2] == 1 and weight.shape[3] == 1 + weight = weight.reshape(weight.shape[0], weight.shape[1]) + extra_bias = weight @ extra_bias + weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) + weight = weight.reshape(weight.shape[0], weight.shape[1], 1, 1) + bias.add_(extra_bias) + + module.weight.data = weight + module.bias.data = bias + + +class ConvNormAct(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size=3, + stride=1, + groups=1, + norm_layer=nn.BatchNorm2d, + act_layer=nn.ReLU, + ): + super(ConvNormAct, self).__init__() + self.conv = nn.Conv2d( + in_chs, out_chs, kernel_size=kernel_size, stride=stride, + padding=1, groups=groups, bias=False) + self.norm = norm_layer(out_chs) + self.act = act_layer() + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + x = self.act(x) + return x + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class PatchEmbed(nn.Module): + def __init__(self, + in_chs, + out_chs, + stride=1, + norm_layer = nn.BatchNorm2d, + ): + super(PatchEmbed, self).__init__() + + if stride == 2: + self.pool = nn.AvgPool2d((2, 2), stride=2, ceil_mode=True, count_include_pad=False) + self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) + self.norm = norm_layer(out_chs) + elif in_chs != out_chs: + self.pool = nn.Identity() + self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) + self.norm = norm_layer(out_chs) + else: + self.pool = nn.Identity() + self.conv = nn.Identity() + self.norm = nn.Identity() + + def forward(self, x): + return self.norm(self.conv(self.pool(x))) + + +class ConvAttention(nn.Module): + """ + Multi-Head Convolutional Attention + """ + + def __init__(self, out_chs, head_dim, norm_layer = nn.BatchNorm2d, act_layer = nn.ReLU): + super(ConvAttention, self).__init__() + self.group_conv3x3 = nn.Conv2d( + out_chs, out_chs, + kernel_size=3, stride=1, padding=1, groups=out_chs // head_dim, bias=False + ) + self.norm = norm_layer(out_chs) + self.act = act_layer() + self.projection = nn.Conv2d(out_chs, out_chs, kernel_size=1, bias=False) + + def forward(self, x): + out = self.group_conv3x3(x) + out = self.norm(out) + out = self.act(out) + out = self.projection(out) + return out + +class NextConvBlock(nn.Module): + """ + Next Convolution Block + """ + + def __init__( + self, + in_chs, + out_chs, + stride=1, + drop_path=0., + drop=0., + head_dim=32, + mlp_ratio=3., + norm_layer=nn.BatchNorm2d, + act_layer=nn.ReLU + ): + super(NextConvBlock, self).__init__() + self.in_chs = in_chs + self.out_chs = out_chs + assert out_chs % head_dim == 0 + + self.patch_embed = PatchEmbed(in_chs, out_chs, stride, norm_layer=norm_layer) + self.mhca = ConvAttention( + out_chs, + head_dim, + norm_layer=norm_layer, + act_layer=act_layer, + ) + self.attn_drop_path = DropPath(drop_path) + + self.norm = norm_layer(out_chs) + self.mlp = ConvMlp( + out_chs, + hidden_features=int(out_chs * mlp_ratio), + drop=drop, + bias=True, + act_layer=act_layer, + ) + self.mlp_drop_path = DropPath(drop_path) + self.is_fused = False + + @torch.no_grad() + def reparameterize(self): + if not self.is_fused: + merge_pre_bn(self.mlp.fc1, self.norm) + self.norm = nn.Identity() + self.is_fused = True + + def forward(self, x): + x = self.patch_embed(x) + x = x + self.attn_drop_path(self.mhca(x)) + + out = self.norm(x) + x = x + self.mlp_drop_path(self.mlp(out)) + return x + + +class EfficientAttention(nn.Module): + """ + Efficient Multi-Head Self Attention + """ + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim, + out_dim=None, + head_dim=32, + qkv_bias=True, + attn_drop=0., + proj_drop=0., + sr_ratio=1, + norm_layer=nn.BatchNorm1d, + ): + super().__init__() + self.dim = dim + self.out_dim = out_dim if out_dim is not None else dim + self.num_heads = self.dim // head_dim + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.q = nn.Linear(dim, self.dim, bias=qkv_bias) + self.k = nn.Linear(dim, self.dim, bias=qkv_bias) + self.v = nn.Linear(dim, self.dim, bias=qkv_bias) + self.proj = nn.Linear(self.dim, self.out_dim) + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + self.N_ratio = sr_ratio ** 2 + if sr_ratio > 1: + self.sr = nn.AvgPool1d(kernel_size=self.N_ratio, stride=self.N_ratio) + self.norm = norm_layer(dim) + else: + self.sr = None + self.norm = None + + def forward(self, x): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) + + if self.sr is not None: + x = self.sr(x.transpose(1, 2)) + x = self.norm(x).transpose(1, 2) + + k = self.k(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) + v = self.v(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) + + if self.fused_attn: + x = F.scaled_dot_product_attention( + q, k, v, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-1, -2) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class NextTransformerBlock(nn.Module): + """ + Next Transformer Block + """ + + def __init__( + self, + in_chs, + out_chs, + drop_path, + stride=1, + sr_ratio=1, + mlp_ratio=2, + head_dim=32, + mix_block_ratio=0.75, + attn_drop=0., + drop=0., + norm_layer=nn.BatchNorm2d, + act_layer=nn.ReLU, + ): + super(NextTransformerBlock, self).__init__() + self.in_chs = in_chs + self.out_chs = out_chs + self.mix_block_ratio = mix_block_ratio + + self.mhsa_out_chs = _make_divisible(int(out_chs * mix_block_ratio), 32) + self.mhca_out_chs = out_chs - self.mhsa_out_chs + + self.patch_embed = PatchEmbed(in_chs, self.mhsa_out_chs, stride) + self.norm1 = norm_layer(self.mhsa_out_chs) + self.e_mhsa = EfficientAttention( + self.mhsa_out_chs, + head_dim=head_dim, + sr_ratio=sr_ratio, + attn_drop=attn_drop, + proj_drop=drop, + ) + self.mhsa_drop_path = DropPath(drop_path * mix_block_ratio) + + self.projection = PatchEmbed(self.mhsa_out_chs, self.mhca_out_chs, stride=1, norm_layer=norm_layer) + self.mhca = ConvAttention( + self.mhca_out_chs, + head_dim=head_dim, + norm_layer=norm_layer, + act_layer=act_layer, + ) + self.mhca_drop_path = DropPath(drop_path * (1 - mix_block_ratio)) + + self.norm2 = norm_layer(out_chs) + self.mlp = ConvMlp( + out_chs, + hidden_features=int(out_chs * mlp_ratio), + act_layer=act_layer, + drop=drop, + ) + self.mlp_drop_path = DropPath(drop_path) + self.is_fused = False + + @torch.no_grad() + def reparameterize(self): + if not self.is_fused: + merge_pre_bn(self.e_mhsa.q, self.norm1) + if self.e_mhsa.norm is not None: + merge_pre_bn(self.e_mhsa.k, self.norm1, self.e_mhsa.norm) + merge_pre_bn(self.e_mhsa.v, self.norm1, self.e_mhsa.norm) + self.e_mhsa.norm = nn.Identity() + else: + merge_pre_bn(self.e_mhsa.k, self.norm1) + merge_pre_bn(self.e_mhsa.v, self.norm1) + self.norm1 = nn.Identity() + + merge_pre_bn(self.mlp.fc1, self.norm2) + self.norm2 = nn.Identity() + self.is_fused = True + + def forward(self, x): + x = self.patch_embed(x) + B, C, H, W = x.shape + + out = self.norm1(x) + out = out.reshape(B, C, -1).transpose(-1, -2) + out = self.mhsa_drop_path(self.e_mhsa(out)) + x = x + out.transpose(-1, -2).reshape(B, C, H, W) + + out = self.projection(x) + out = out + self.mhca_drop_path(self.mhca(out)) + x = torch.cat([x, out], dim=1) + + out = self.norm2(x) + x = x + self.mlp_drop_path(self.mlp(out)) + return x + + +class NextStage(nn.Module): + + def __init__( + self, + in_chs, + block_chs, + block_types, + stride=2, + sr_ratio=1, + mix_block_ratio=1.0, + drop=0., + attn_drop=0., + drop_path=0., + head_dim=32, + norm_layer=nn.BatchNorm2d, + act_layer=nn.ReLU, + ): + super().__init__() + self.grad_checkpointing = False + + blocks = [] + for block_idx, block_type in enumerate(block_types): + stride = stride if block_idx == 0 else 1 + out_chs = block_chs[block_idx] + block_type = block_types[block_idx] + dpr = drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path + if block_type is NextConvBlock: + layer = NextConvBlock( + in_chs, + out_chs, + stride=stride, + drop_path=dpr, + drop=drop, + head_dim=head_dim, + norm_layer=norm_layer, + act_layer=act_layer, + ) + blocks.append(layer) + elif block_type is NextTransformerBlock: + layer = NextTransformerBlock( + in_chs, + out_chs, + drop_path=dpr, + stride=stride, + sr_ratio=sr_ratio, + head_dim=head_dim, + mix_block_ratio=mix_block_ratio, + attn_drop=attn_drop, + drop=drop, + norm_layer=norm_layer, + act_layer=act_layer, + ) + blocks.append(layer) + in_chs = out_chs + + self.blocks = nn.Sequential(*blocks) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + def forward(self, x): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class NextViT(nn.Module): + def __init__( + self, + in_chans, + num_classes=1000, + global_pool='avg', + stem_chs=(64, 32, 64), + depths=(3, 4, 10, 3), + strides=(1, 2, 2, 2), + sr_ratios=(8, 4, 2, 1), + drop_path_rate=0.1, + attn_drop_rate=0., + drop_rate=0., + head_dim=32, + mix_block_ratio=0.75, + norm_layer=nn.BatchNorm2d, + act_layer=None, + ): + super(NextViT, self).__init__() + self.grad_checkpointing = False + self.num_classes = num_classes + norm_layer = get_norm_layer(norm_layer) + if act_layer is None: + act_layer = partial(nn.ReLU, inplace=True) + else: + act_layer = get_act_layer(act_layer) + + self.stage_out_chs = [ + [96] * (depths[0]), + [192] * (depths[1] - 1) + [256], + [384, 384, 384, 384, 512] * (depths[2] // 5), + [768] * (depths[3] - 1) + [1024] + ] + self.feature_info = [dict( + num_chs=sc[-1], + reduction=2**(i + 2), + module=f'stages.{i}' + ) for i, sc in enumerate(self.stage_out_chs)] + + # Next Hybrid Strategy + self.stage_block_types = [ + [NextConvBlock] * depths[0], + [NextConvBlock] * (depths[1] - 1) + [NextTransformerBlock], + [NextConvBlock, NextConvBlock, NextConvBlock, NextConvBlock, NextTransformerBlock] * (depths[2] // 5), + [NextConvBlock] * (depths[3] - 1) + [NextTransformerBlock]] + + self.stem = nn.Sequential( + ConvNormAct(in_chans, stem_chs[0], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer), + ConvNormAct(stem_chs[0], stem_chs[1], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), + ConvNormAct(stem_chs[1], stem_chs[2], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), + ConvNormAct(stem_chs[2], stem_chs[2], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer), + ) + in_chs = out_chs = stem_chs[-1] + stages = [] + idx = 0 + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + for stage_idx in range(len(depths)): + stage = NextStage( + in_chs=in_chs, + block_chs=self.stage_out_chs[stage_idx], + block_types=self.stage_block_types[stage_idx], + stride=strides[stage_idx], + sr_ratio=sr_ratios[stage_idx], + mix_block_ratio=mix_block_ratio, + head_dim=head_dim, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[stage_idx], + norm_layer=norm_layer, + act_layer=act_layer, + ) + in_chs = out_chs = self.stage_out_chs[stage_idx][-1] + stages += [stage] + idx += depths[stage_idx] + self.num_features = self.head_hidden_size = out_chs + self.stages = nn.Sequential(*stages) + self.norm = norm_layer(out_chs) + self.head = ClassifierHead(pool_type=global_pool, in_features=out_chs, num_classes=num_classes) + + self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))] + self._initialize_weights() + + def _initialize_weights(self): + for n, m in self.named_modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if hasattr(m, 'bias') and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + trunc_normal_(m.weight, std=.02) + if hasattr(m, 'bias') and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', # stem and embed + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + for stage in self.stages: + stage.set_grad_checkpointing(enable=enable) + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ Remap original checkpoints -> timm """ + if 'head.fc.weight' in state_dict: + return state_dict # non-original + + D = model.state_dict() + out_dict = {} + # remap originals based on order + for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()): + out_dict[ka] = vb + + return out_dict + + +def _create_nextvit(variant, pretrained=False, **kwargs): + default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + NextViT, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'nextvit_small.bd_in1k': _cfg( + hf_hub_id='timm/', + ), + 'nextvit_base.bd_in1k': _cfg( + hf_hub_id='timm/', + ), + 'nextvit_large.bd_in1k': _cfg( + hf_hub_id='timm/', + ), + 'nextvit_small.bd_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + 'nextvit_base.bd_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + 'nextvit_large.bd_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + + 'nextvit_small.bd_ssld_6m_in1k': _cfg( + hf_hub_id='timm/', + ), + 'nextvit_base.bd_ssld_6m_in1k': _cfg( + hf_hub_id='timm/', + ), + 'nextvit_large.bd_ssld_6m_in1k': _cfg( + hf_hub_id='timm/', + ), + 'nextvit_small.bd_ssld_6m_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + 'nextvit_base.bd_ssld_6m_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), + 'nextvit_large.bd_ssld_6m_in1k_384': _cfg( + hf_hub_id='timm/', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, + ), +}) + + +@register_model +def nextvit_small(pretrained=False, **kwargs): + model_args = dict(depths=(3, 4, 10, 3), drop_path_rate=0.1) + model = _create_nextvit( + 'nextvit_small', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def nextvit_base(pretrained=False, **kwargs): + model_args = dict(depths=(3, 4, 20, 3), drop_path_rate=0.2) + model = _create_nextvit( + 'nextvit_base', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def nextvit_large(pretrained=False, **kwargs): + model_args = dict(depths=(3, 4, 30, 3), drop_path_rate=0.2) + model = _create_nextvit( + 'nextvit_large', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/nfnet.py b/pytorch-image-models/timm/models/nfnet.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa275645e7a9d8b3b923a3be25ffb7df7037bc6 --- /dev/null +++ b/pytorch-image-models/timm/models/nfnet.py @@ -0,0 +1,1045 @@ +""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models + +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + +Paper: `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Status: +* These models are a work in progress, experiments ongoing. +* Pretrained weights for two models so far, more to come. +* Model details updated to closer match official JAX code now that it's released +* NF-ResNet, NF-RegNet-B, and NFNet-F models supported + +Hacked together by / copyright Ross Wightman, 2021. +""" +from collections import OrderedDict +from dataclasses import dataclass, replace +from functools import partial +from typing import Callable, Tuple, Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame, \ + get_act_layer, get_act_fn, get_attn, make_divisible +from ._builder import build_model_with_cfg +from ._features_fx import register_notrace_module +from ._manipulate import checkpoint_seq +from ._registry import generate_default_cfgs, register_model + +__all__ = ['NormFreeNet', 'NfCfg'] # model_registry will add each entrypoint fn to this + + +@dataclass +class NfCfg: + depths: Tuple[int, int, int, int] + channels: Tuple[int, int, int, int] + alpha: float = 0.2 + stem_type: str = '3x3' + stem_chs: Optional[int] = None + group_size: Optional[int] = None + attn_layer: Optional[str] = None + attn_kwargs: dict = None + attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used + width_factor: float = 1.0 + bottle_ratio: float = 0.5 + num_features: int = 0 # num out_channels for final conv, no final_conv if 0 + ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal + reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle + extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models + gamma_in_act: bool = False + same_padding: bool = False + std_conv_eps: float = 1e-5 + skipinit: bool = False # disabled by default, non-trivial performance impact + zero_init_fc: bool = False + act_layer: str = 'silu' + + +class GammaAct(nn.Module): + def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): + super().__init__() + self.act_fn = get_act_fn(act_type) + self.gamma = gamma + self.inplace = inplace + + def forward(self, x): + return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) + + +def act_with_gamma(act_type, gamma: float = 1.): + def _create(inplace=False): + return GammaAct(act_type, gamma=gamma, inplace=inplace) + return _create + + +class DownsampleAvg(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + dilation: int = 1, + first_dilation: Optional[int] = None, + conv_layer: Callable = ScaledStdConv2d, + ): + """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + + def forward(self, x): + return self.conv(self.pool(x)) + + +@register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301 +class NormFreeBlock(nn.Module): + """Normalization-Free pre-activation block. + """ + + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + stride: int = 1, + dilation: int = 1, + first_dilation: Optional[int] = None, + alpha: float = 1.0, + beta: float = 1.0, + bottle_ratio: float = 0.25, + group_size: Optional[int] = None, + ch_div: int = 1, + reg: bool = True, + extra_conv: bool = False, + skipinit: bool = False, + attn_layer: Optional[Callable] = None, + attn_gain: bool = 2.0, + act_layer: Optional[Callable] = None, + conv_layer: Callable = ScaledStdConv2d, + drop_path_rate: float = 0., + ): + super().__init__() + first_dilation = first_dilation or dilation + out_chs = out_chs or in_chs + # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet + mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) + groups = 1 if not group_size else mid_chs // group_size + if group_size and group_size % ch_div == 0: + mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error + self.alpha = alpha + self.beta = beta + self.attn_gain = attn_gain + + if in_chs != out_chs or stride != 1 or dilation != first_dilation: + self.downsample = DownsampleAvg( + in_chs, + out_chs, + stride=stride, + dilation=dilation, + first_dilation=first_dilation, + conv_layer=conv_layer, + ) + else: + self.downsample = None + + self.act1 = act_layer() + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.act2 = act_layer(inplace=True) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + if extra_conv: + self.act2b = act_layer(inplace=True) + self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) + else: + self.act2b = None + self.conv2b = None + if reg and attn_layer is not None: + self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 + else: + self.attn = None + self.act3 = act_layer() + self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) + if not reg and attn_layer is not None: + self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 + else: + self.attn_last = None + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None + + def forward(self, x): + out = self.act1(x) * self.beta + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(out) + + # residual branch + out = self.conv1(out) + out = self.conv2(self.act2(out)) + if self.conv2b is not None: + out = self.conv2b(self.act2b(out)) + if self.attn is not None: + out = self.attn_gain * self.attn(out) + out = self.conv3(self.act3(out)) + if self.attn_last is not None: + out = self.attn_gain * self.attn_last(out) + out = self.drop_path(out) + + if self.skipinit_gain is not None: + out.mul_(self.skipinit_gain) + out = out * self.alpha + shortcut + return out + + +def create_stem( + in_chs: int, + out_chs: int, + stem_type: str = '', + conv_layer: Optional[Callable] = None, + act_layer: Optional[Callable] = None, + preact_feature: bool = True, +): + stem_stride = 2 + stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') + stem = OrderedDict() + assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') + if 'deep' in stem_type: + if 'quad' in stem_type: + # 4 deep conv stack as in NFNet-F models + assert not 'pool' in stem_type + stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) + strides = (2, 1, 1, 2) + stem_stride = 4 + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') + else: + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets + strides = (2, 1, 1) + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') + last_idx = len(stem_chs) - 1 + for i, (c, s) in enumerate(zip(stem_chs, strides)): + stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) + if i != last_idx: + stem[f'act{i + 2}'] = act_layer(inplace=True) + in_chs = c + elif '3x3' in stem_type: + # 3x3 stem conv as in RegNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) + else: + # 7x7 stem conv as in ResNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + + if 'pool' in stem_type: + stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) + stem_stride = 4 + + return nn.Sequential(stem), stem_stride, stem_feature + + +# from https://github.com/deepmind/deepmind-research/tree/master/nfnets +_nonlin_gamma = dict( + identity=1.0, + celu=1.270926833152771, + elu=1.2716004848480225, + gelu=1.7015043497085571, + leaky_relu=1.70590341091156, + log_sigmoid=1.9193484783172607, + log_softmax=1.0002083778381348, + relu=1.7139588594436646, + relu6=1.7131484746932983, + selu=1.0008515119552612, + sigmoid=4.803835391998291, + silu=1.7881293296813965, + softsign=2.338853120803833, + softplus=1.9203323125839233, + tanh=1.5939117670059204, +) + + +class NormFreeNet(nn.Module): + """ Normalization-Free Network + + As described in : + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + and + `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 + + This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and + the (preact) ResNet models described earlier in the paper. + + There are a few differences: + * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), + this changes channel dim and param counts slightly from the paper models + * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance + impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. + * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but + apply it in each activation. This is slightly slower, numerically different, but matches official impl. + * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput + for what it is/does. Approx 8-10% throughput loss. + """ + def __init__( + self, + cfg: NfCfg, + num_classes: int = 1000, + in_chans: int = 3, + global_pool: str = 'avg', + output_stride: int = 32, + drop_rate: float = 0., + drop_path_rate: float = 0., + **kwargs, + ): + """ + Args: + cfg: Model architecture configuration. + num_classes: Number of classifier classes. + in_chans: Number of input channels. + global_pool: Global pooling type. + output_stride: Output stride of network, one of (8, 16, 32). + drop_rate: Dropout rate. + drop_path_rate: Stochastic depth drop-path rate. + **kwargs: Extra kwargs overlayed onto cfg. + """ + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + cfg = replace(cfg, **kwargs) + assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." + conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d + if cfg.gamma_in_act: + act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) + conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) + else: + act_layer = get_act_layer(cfg.act_layer) + conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) + attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + + stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) + self.stem, stem_stride, stem_feat = create_stem( + in_chans, + stem_chs, + cfg.stem_type, + conv_layer=conv_layer, + act_layer=act_layer, + ) + + self.feature_info = [stem_feat] + drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + prev_chs = stem_chs + net_stride = stem_stride + dilation = 1 + expected_var = 1.0 + stages = [] + for stage_idx, stage_depth in enumerate(cfg.depths): + stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx in range(cfg.depths[stage_idx]): + first_block = block_idx == 0 and stage_idx == 0 + out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) + blocks += [NormFreeBlock( + in_chs=prev_chs, out_chs=out_chs, + alpha=cfg.alpha, + beta=1. / expected_var ** 0.5, + stride=stride if block_idx == 0 else 1, + dilation=dilation, + first_dilation=first_dilation, + group_size=cfg.group_size, + bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, + ch_div=cfg.ch_div, + reg=cfg.reg, + extra_conv=cfg.extra_conv, + skipinit=cfg.skipinit, + attn_layer=attn_layer, + attn_gain=cfg.attn_gain, + act_layer=act_layer, + conv_layer=conv_layer, + drop_path_rate=drop_path_rates[stage_idx][block_idx], + )] + if block_idx == 0: + expected_var = 1. # expected var is reset after first block of each stage + expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance + first_dilation = dilation + prev_chs = out_chs + self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] + stages += [nn.Sequential(*blocks)] + self.stages = nn.Sequential(*stages) + + if cfg.num_features: + # The paper NFRegNet models have an EfficientNet-like final head convolution. + self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) + self.final_conv = conv_layer(prev_chs, self.num_features, 1) + self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.final_act = act_layer(inplace=cfg.num_features > 0) + + self.head_hidden_size = self.num_features + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + ) + + for n, m in self.named_modules(): + if 'fc' in n and isinstance(m, nn.Linear): + if cfg.zero_init_fc: + nn.init.zeros_(m.weight) + else: + nn.init.normal_(m.weight, 0., .01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') + if m.bias is not None: + nn.init.zeros_(m.bias) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=[ + (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), + (r'^final_conv', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + x = self.final_conv(x) + x = self.final_act(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _nfres_cfg( + depths, + channels=(256, 512, 1024, 2048), + group_size=None, + act_layer='relu', + attn_layer=None, + attn_kwargs=None, +): + attn_kwargs = attn_kwargs or {} + cfg = NfCfg( + depths=depths, + channels=channels, + stem_type='7x7_pool', + stem_chs=64, + bottle_ratio=0.25, + group_size=group_size, + act_layer=act_layer, + attn_layer=attn_layer, + attn_kwargs=attn_kwargs, + ) + return cfg + + +def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): + num_features = 1280 * channels[-1] // 440 + attn_kwargs = dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, + channels=channels, + stem_type='3x3', + group_size=8, + width_factor=0.75, + bottle_ratio=2.25, + num_features=num_features, + reg=True, + attn_layer='se', + attn_kwargs=attn_kwargs, + ) + return cfg + + +def _nfnet_cfg( + depths, + channels=(256, 512, 1536, 1536), + group_size=128, + bottle_ratio=0.5, + feat_mult=2., + act_layer='gelu', + attn_layer='se', + attn_kwargs=None, +): + num_features = int(channels[-1] * feat_mult) + attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, + channels=channels, + stem_type='deep_quad', + stem_chs=128, + group_size=group_size, + bottle_ratio=bottle_ratio, + extra_conv=True, + num_features=num_features, + act_layer=act_layer, + attn_layer=attn_layer, + attn_kwargs=attn_kwargs, + ) + return cfg + + +def _dm_nfnet_cfg( + depths, + channels=(256, 512, 1536, 1536), + act_layer='gelu', + skipinit=True, +): + cfg = NfCfg( + depths=depths, + channels=channels, + stem_type='deep_quad', + stem_chs=128, + group_size=128, + bottle_ratio=0.5, + extra_conv=True, + gamma_in_act=True, + same_padding=True, + skipinit=skipinit, + num_features=int(channels[-1] * 2.0), + act_layer=act_layer, + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.5), + ) + return cfg + + +model_cfgs = dict( + # NFNet-F models w/ GELU compatible with DeepMind weights + dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), + dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), + dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), + dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), + dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), + dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), + dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), + + # NFNet-F models w/ GELU + nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), + nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), + nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), + nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), + nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), + nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), + nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), + nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), + + # Experimental 'light' versions of NFNet-F that are little leaner, w/ SiLU act + nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), + eca_nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l1=_nfnet_cfg( + depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l2=_nfnet_cfg( + depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l3=_nfnet_cfg( + depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + + # EffNet influenced RegNet defs. + # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. + nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), + nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), + nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), + nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), + nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), + nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), + + # ResNet (preact, D style deep stem/avg down) defs + nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), + nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), + nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), + + nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + + nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), + + test_nfnet=_nfnet_cfg( + depths=(1, 1, 1, 1), channels=(32, 64, 96, 128), feat_mult=1.5, group_size=8, bottle_ratio=0.25, + attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), +) + + +def _create_normfreenet(variant, pretrained=False, **kwargs): + model_cfg = model_cfgs[variant] + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + NormFreeNet, + variant, + pretrained, + model_cfg=model_cfg, + feature_cfg=feature_cfg, + **kwargs, + ) + + +def _dcfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'dm_nfnet_f0.dm_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', + pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9, crop_mode='squash'), + 'dm_nfnet_f1.dm_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91, crop_mode='squash'), + 'dm_nfnet_f2.dm_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92, crop_mode='squash'), + 'dm_nfnet_f3.dm_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94, crop_mode='squash'), + 'dm_nfnet_f4.dm_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', + pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951, crop_mode='squash'), + 'dm_nfnet_f5.dm_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', + pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954, crop_mode='squash'), + 'dm_nfnet_f6.dm_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', + pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956, crop_mode='squash'), + + 'nfnet_f0': _dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + 'nfnet_f1': _dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + 'nfnet_f2': _dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + 'nfnet_f3': _dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + 'nfnet_f4': _dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + 'nfnet_f5': _dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + 'nfnet_f6': _dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + 'nfnet_f7': _dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + 'nfnet_l0.ra2_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'eca_nfnet_l0.ra2_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'eca_nfnet_l1.ra2_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), test_crop_pct=1.0), + 'eca_nfnet_l2.ra3_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), test_crop_pct=1.0), + 'eca_nfnet_l3': _dcfg( + url='', + pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), test_crop_pct=1.0), + + 'nf_regnet_b0': _dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), + 'nf_regnet_b1.ra2_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec + 'nf_regnet_b2': _dcfg( + url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), + 'nf_regnet_b3': _dcfg( + url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), + 'nf_regnet_b4': _dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), + 'nf_regnet_b5': _dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), + + 'nf_resnet26': _dcfg(url='', first_conv='stem.conv'), + 'nf_resnet50.ra2_in1k': _dcfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), + 'nf_resnet101': _dcfg(url='', first_conv='stem.conv'), + + 'nf_seresnet26': _dcfg(url='', first_conv='stem.conv'), + 'nf_seresnet50': _dcfg(url='', first_conv='stem.conv'), + 'nf_seresnet101': _dcfg(url='', first_conv='stem.conv'), + + 'nf_ecaresnet26': _dcfg(url='', first_conv='stem.conv'), + 'nf_ecaresnet50': _dcfg(url='', first_conv='stem.conv'), + 'nf_ecaresnet101': _dcfg(url='', first_conv='stem.conv'), + + 'test_nfnet.r160_in1k': _dcfg( + hf_hub_id='timm/', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + crop_pct=0.95, input_size=(3, 160, 160), pool_size=(5, 5)), +}) + + +@register_model +def dm_nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F0 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F1 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F2 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F3 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F4 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F5 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F6 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F0 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F1 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F2 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F3 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F4 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F5 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F6 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-F7 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: + """ NFNet-L0b w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio + """ + return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: + """ ECA-NFNet-L0 w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l1(pretrained=False, **kwargs) -> NormFreeNet: + """ ECA-NFNet-L1 w/ SiLU + My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l2(pretrained=False, **kwargs) -> NormFreeNet: + """ ECA-NFNet-L2 w/ SiLU + My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l3(pretrained=False, **kwargs) -> NormFreeNet: + """ ECA-NFNet-L3 w/ SiLU + My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b0(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free RegNet-B0 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b1(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free RegNet-B1 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b2(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free RegNet-B2 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b3(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free RegNet-B3 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b4(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free RegNet-B4 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b5(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free RegNet-B5 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet26(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free ResNet-26 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet50(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free ResNet-50 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet101(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free ResNet-101 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet26(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free SE-ResNet26 + """ + return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet50(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free SE-ResNet50 + """ + return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet101(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free SE-ResNet101 + """ + return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet26(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free ECA-ResNet26 + """ + return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet50(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free ECA-ResNet50 + """ + return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet101(pretrained=False, **kwargs) -> NormFreeNet: + """ Normalization-Free ECA-ResNet101 + """ + return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) + + +@register_model +def test_nfnet(pretrained=False, **kwargs) -> NormFreeNet: + return _create_normfreenet('test_nfnet', pretrained=pretrained, **kwargs) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/pit.py b/pytorch-image-models/timm/models/pit.py new file mode 100644 index 0000000000000000000000000000000000000000..3a1090b89f3febad91648e4009e3a0a11363baad --- /dev/null +++ b/pytorch-image-models/timm/models/pit.py @@ -0,0 +1,460 @@ +""" Pooling-based Vision Transformer (PiT) in PyTorch + +A PyTorch implement of Pooling-based Vision Transformers as described in +'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 + +This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. + +Modifications for timm by / Copyright 2020 Ross Wightman +""" +# PiT +# Copyright 2021-present NAVER Corp. +# Apache License v2.0 + +import math +import re +from functools import partial +from typing import Optional, Sequence, Tuple + +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import trunc_normal_, to_2tuple +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs +from .vision_transformer import Block + + +__all__ = ['PoolingVisionTransformer'] # model_registry will add each entrypoint fn to this + + +class SequentialTuple(nn.Sequential): + """ This module exists to work around torchscript typing issues list -> list""" + def __init__(self, *args): + super(SequentialTuple, self).__init__(*args) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + for module in self: + x = module(x) + return x + + +class Transformer(nn.Module): + def __init__( + self, + base_dim, + depth, + heads, + mlp_ratio, + pool=None, + proj_drop=.0, + attn_drop=.0, + drop_path_prob=None, + norm_layer=None, + ): + super(Transformer, self).__init__() + embed_dim = base_dim * heads + + self.pool = pool + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, + num_heads=heads, + mlp_ratio=mlp_ratio, + qkv_bias=True, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path_prob[i], + norm_layer=partial(nn.LayerNorm, eps=1e-6) + ) + for i in range(depth)]) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + x, cls_tokens = x + token_length = cls_tokens.shape[1] + if self.pool is not None: + x, cls_tokens = self.pool(x, cls_tokens) + + B, C, H, W = x.shape + x = x.flatten(2).transpose(1, 2) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.norm(x) + x = self.blocks(x) + + cls_tokens = x[:, :token_length] + x = x[:, token_length:] + x = x.transpose(1, 2).reshape(B, C, H, W) + + return x, cls_tokens + + +class Pooling(nn.Module): + def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): + super(Pooling, self).__init__() + + self.conv = nn.Conv2d( + in_feature, + out_feature, + kernel_size=stride + 1, + padding=stride // 2, + stride=stride, + padding_mode=padding_mode, + groups=in_feature, + ) + self.fc = nn.Linear(in_feature, out_feature) + + def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: + x = self.conv(x) + cls_token = self.fc(cls_token) + return x, cls_token + + +class ConvEmbedding(nn.Module): + def __init__( + self, + in_channels, + out_channels, + img_size: int = 224, + patch_size: int = 16, + stride: int = 8, + padding: int = 0, + ): + super(ConvEmbedding, self).__init__() + padding = padding + self.img_size = to_2tuple(img_size) + self.patch_size = to_2tuple(patch_size) + self.height = math.floor((self.img_size[0] + 2 * padding - self.patch_size[0]) / stride + 1) + self.width = math.floor((self.img_size[1] + 2 * padding - self.patch_size[1]) / stride + 1) + self.grid_size = (self.height, self.width) + + self.conv = nn.Conv2d( + in_channels, out_channels, kernel_size=patch_size, + stride=stride, padding=padding, bias=True) + + def forward(self, x): + x = self.conv(x) + return x + + +class PoolingVisionTransformer(nn.Module): + """ Pooling-based Vision Transformer + + A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' + - https://arxiv.org/abs/2103.16302 + """ + def __init__( + self, + img_size: int = 224, + patch_size: int = 16, + stride: int = 8, + stem_type: str = 'overlap', + base_dims: Sequence[int] = (48, 48, 48), + depth: Sequence[int] = (2, 6, 4), + heads: Sequence[int] = (2, 4, 8), + mlp_ratio: float = 4, + num_classes=1000, + in_chans=3, + global_pool='token', + distilled=False, + drop_rate=0., + pos_drop_drate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + ): + super(PoolingVisionTransformer, self).__init__() + assert global_pool in ('token',) + + self.base_dims = base_dims + self.heads = heads + embed_dim = base_dims[0] * heads[0] + self.num_classes = num_classes + self.global_pool = global_pool + self.num_tokens = 2 if distilled else 1 + self.feature_info = [] + + self.patch_embed = ConvEmbedding(in_chans, embed_dim, img_size, patch_size, stride) + self.pos_embed = nn.Parameter(torch.randn(1, embed_dim, self.patch_embed.height, self.patch_embed.width)) + self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, embed_dim)) + self.pos_drop = nn.Dropout(p=pos_drop_drate) + + transformers = [] + # stochastic depth decay rule + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] + prev_dim = embed_dim + for i in range(len(depth)): + pool = None + embed_dim = base_dims[i] * heads[i] + if i > 0: + pool = Pooling( + prev_dim, + embed_dim, + stride=2, + ) + transformers += [Transformer( + base_dims[i], + depth[i], + heads[i], + mlp_ratio, + pool=pool, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path_prob=dpr[i], + )] + prev_dim = embed_dim + self.feature_info += [dict(num_chs=prev_dim, reduction=(stride - 1) * 2**i, module=f'transformers.{i}')] + + self.transformers = SequentialTuple(*transformers) + self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) + self.num_features = self.head_hidden_size = self.embed_dim = embed_dim + + # Classifier head + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + def get_classifier(self) -> nn.Module: + if self.head_dist is not None: + return self.head, self.head_dist + else: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.head_dist is not None: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x + self.pos_embed) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + x, cls_tokens = self.transformers((x, cls_tokens)) + cls_tokens = self.norm(cls_tokens) + return cls_tokens + + def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: + if self.head_dist is not None: + assert self.global_pool == 'token' + x, x_dist = x[:, 0], x[:, 1] + x = self.head_drop(x) + x_dist = self.head_drop(x) + if not pre_logits: + x = self.head(x) + x_dist = self.head_dist(x_dist) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train / finetune, inference average the classifier predictions + return (x + x_dist) / 2 + else: + if self.global_pool == 'token': + x = x[:, 0] + x = self.head_drop(x) + if not pre_logits: + x = self.head(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ preprocess checkpoints """ + out_dict = {} + p_blocks = re.compile(r'pools\.(\d)\.') + for k, v in state_dict.items(): + # FIXME need to update resize for PiT impl + # if k == 'pos_embed' and v.shape != model.pos_embed.shape: + # # To resize pos embedding when using model at different size from pretrained weights + # v = resize_pos_embed(v, model.pos_embed) + k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1)) + 1}.pool.', k) + out_dict[k] = v + return out_dict + + +def _create_pit(variant, pretrained=False, **kwargs): + default_out_indices = tuple(range(3)) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + PoolingVisionTransformer, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(feature_cls='hook', no_rewrite=True, out_indices=out_indices), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # deit models (FB weights) + 'pit_ti_224.in1k': _cfg(hf_hub_id='timm/'), + 'pit_xs_224.in1k': _cfg(hf_hub_id='timm/'), + 'pit_s_224.in1k': _cfg(hf_hub_id='timm/'), + 'pit_b_224.in1k': _cfg(hf_hub_id='timm/'), + 'pit_ti_distilled_224.in1k': _cfg( + hf_hub_id='timm/', + classifier=('head', 'head_dist')), + 'pit_xs_distilled_224.in1k': _cfg( + hf_hub_id='timm/', + classifier=('head', 'head_dist')), + 'pit_s_distilled_224.in1k': _cfg( + hf_hub_id='timm/', + classifier=('head', 'head_dist')), + 'pit_b_distilled_224.in1k': _cfg( + hf_hub_id='timm/', + classifier=('head', 'head_dist')), +}) + + +@register_model +def pit_b_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + ) + return _create_pit('pit_b_224', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pit_s_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + ) + return _create_pit('pit_s_224', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pit_xs_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + ) + return _create_pit('pit_xs_224', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pit_ti_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + ) + return _create_pit('pit_ti_224', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pit_b_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + distilled=True, + ) + return _create_pit('pit_b_distilled_224', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pit_s_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + distilled=True, + ) + return _create_pit('pit_s_distilled_224', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pit_xs_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + ) + return _create_pit('pit_xs_distilled_224', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pit_ti_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: + model_args = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + ) + return _create_pit('pit_ti_distilled_224', pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/pnasnet.py b/pytorch-image-models/timm/models/pnasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..20d17945b5a34eb0d748992106cfdf6a510a0a91 --- /dev/null +++ b/pytorch-image-models/timm/models/pnasnet.py @@ -0,0 +1,378 @@ +""" + pnasnet5large implementation grabbed from Cadene's pretrained models + Additional credit to https://github.com/creafz + + https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py + +""" +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['PNASNet5Large'] + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=padding) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=padding) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) + self.act_2 = nn.ReLU() + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=padding) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class FactorizedReduction(nn.Module): + + def __init__(self, in_channels, out_channels, padding=''): + super(FactorizedReduction, self).__init__() + self.act = nn.ReLU() + self.path_1 = nn.Sequential(OrderedDict([ + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.path_2 = nn.Sequential(OrderedDict([ + ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x_path1 = self.path_1(x) + x_path2 = self.path_2(x) + out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + return out + + +class CellBase(nn.Module): + + def cell_forward(self, x_left, x_right): + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) + x_comb_iter_3_right = self.comb_iter_3_right(x_right) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_left) + if self.comb_iter_4_right is not None: + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + else: + x_comb_iter_4_right = x_right + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem0(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(CellStem0, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_0_right = nn.Sequential(OrderedDict([ + ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), + ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), + ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), + ])) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) + self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) + + def forward(self, x_left): + x_right = self.conv_1x1(x_left) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class Cell(CellBase): + + def __init__( + self, + in_chs_left, + out_chs_left, + in_chs_right, + out_chs_right, + pad_type='', + is_reduction=False, + match_prev_layer_dims=False, + ): + super(Cell, self).__init__() + + # If `is_reduction` is set to `True` stride 2 is used for + # convolution and pooling layers to reduce the spatial size of + # the output of a cell approximately by a factor of 2. + stride = 2 if is_reduction else 1 + + # If `match_prev_layer_dimensions` is set to `True` + # `FactorizedReduction` is used to reduce the spatial size + # of the left input of a cell approximately by a factor of 2. + self.match_prev_layer_dimensions = match_prev_layer_dims + if match_prev_layer_dims: + self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) + else: + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) + self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) + if is_reduction: + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) + else: + self.comb_iter_4_right = None + + def forward(self, x_left, x_right): + x_left = self.conv_prev_1x1(x_left) + x_right = self.conv_1x1(x_right) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class PNASNet5Large(nn.Module): + def __init__( + self, + num_classes=1000, + in_chans=3, + output_stride=32, + drop_rate=0., + global_pool='avg', + pad_type='', + ): + super(PNASNet5Large, self).__init__() + self.num_classes = num_classes + self.num_features = self.head_hidden_size = 4320 + assert output_stride == 32 + + self.conv_0 = ConvNormAct( + in_chans, 96, kernel_size=3, stride=2, padding=0, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) + + self.cell_stem_1 = Cell( + in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, + match_prev_layer_dims=True, is_reduction=True) + self.cell_0 = Cell( + in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_1 = Cell( + in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_2 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_3 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + + self.cell_4 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, + is_reduction=True) + self.cell_5 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_6 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + self.cell_7 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + + self.cell_8 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, + is_reduction=True) + self.cell_9 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_10 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.cell_11 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.act = nn.ReLU() + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv_0'), + dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), + dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), + dict(num_chs=4320, reduction=32, module='act'), + ] + + self.global_pool, self.head_drop, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict(stem=r'^conv_0|cell_stem_[01]', blocks=r'^cell_(\d+)') + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.last_linear + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv_0 = self.conv_0(x) + x_stem_0 = self.cell_stem_0(x_conv_0) + x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) + x_cell_0 = self.cell_0(x_stem_0, x_stem_1) + x_cell_1 = self.cell_1(x_stem_1, x_cell_0) + x_cell_2 = self.cell_2(x_cell_0, x_cell_1) + x_cell_3 = self.cell_3(x_cell_1, x_cell_2) + x_cell_4 = self.cell_4(x_cell_2, x_cell_3) + x_cell_5 = self.cell_5(x_cell_3, x_cell_4) + x_cell_6 = self.cell_6(x_cell_4, x_cell_5) + x_cell_7 = self.cell_7(x_cell_5, x_cell_6) + x_cell_8 = self.cell_8(x_cell_6, x_cell_7) + x_cell_9 = self.cell_9(x_cell_7, x_cell_8) + x_cell_10 = self.cell_10(x_cell_8, x_cell_9) + x_cell_11 = self.cell_11(x_cell_9, x_cell_10) + x = self.act(x_cell_11) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.last_linear(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_pnasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + PNASNet5Large, + variant, + pretrained, + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs, + ) + + +default_cfgs = generate_default_cfgs({ + 'pnasnet5large.tf_in1k': { + 'hf_hub_id': 'timm/', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv_0.conv', + 'classifier': 'last_linear', + }, +}) + + +@register_model +def pnasnet5large(pretrained=False, **kwargs) -> PNASNet5Large: + r"""PNASNet-5 model architecture from the + `"Progressive Neural Architecture Search" + `_ paper. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs) diff --git a/pytorch-image-models/timm/models/pvt_v2.py b/pytorch-image-models/timm/models/pvt_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..9200bbd45151e8ca6cf7a6ac15c0ef2ea891f9ab --- /dev/null +++ b/pytorch-image-models/timm/models/pvt_v2.py @@ -0,0 +1,503 @@ +""" Pyramid Vision Transformer v2 + +@misc{wang2021pvtv2, + title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, + author={Wenhai Wang and Enze Xie and Xiang Li and Deng-Ping Fan and Kaitao Song and Ding Liang and + Tong Lu and Ping Luo and Ling Shao}, + year={2021}, + eprint={2106.13797}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} + +Based on Apache 2.0 licensed code at https://github.com/whai362/PVT + +Modifications and timm support by / Copyright 2022, Ross Wightman +""" + +import math +from typing import Callable, List, Optional, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, to_2tuple, to_ntuple, trunc_normal_, LayerNorm, use_fused_attn +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['PyramidVisionTransformerV2'] + + +class MlpWithDepthwiseConv(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0., + extra_relu=False, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.relu = nn.ReLU() if extra_relu else nn.Identity() + self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x, feat_size: List[int]): + x = self.fc1(x) + B, N, C = x.shape + x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1]) + x = self.relu(x) + x = self.dwconv(x) + x = x.flatten(2).transpose(1, 2) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim, + num_heads=8, + sr_ratio=1, + linear_attn=False, + qkv_bias=True, + attn_drop=0., + proj_drop=0. + ): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.fused_attn = use_fused_attn() + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + if not linear_attn: + self.pool = None + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + self.act = None + else: + self.pool = nn.AdaptiveAvgPool2d(7) + self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1) + self.norm = nn.LayerNorm(dim) + self.act = nn.GELU() + + def forward(self, x, feat_size: List[int]): + B, N, C = x.shape + H, W = feat_size + q = self.q(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + if self.pool is not None: + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.sr(self.pool(x)).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + x = self.act(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + else: + if self.sr is not None: + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + else: + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + + if self.fused_attn: + x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + sr_ratio=1, + linear_attn=False, + qkv_bias=False, + proj_drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + sr_ratio=sr_ratio, + linear_attn=linear_attn, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = MlpWithDepthwiseConv( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + extra_relu=linear_attn, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x, feat_size: List[int]): + x = x + self.drop_path1(self.attn(self.norm1(x), feat_size)) + x = x + self.drop_path2(self.mlp(self.norm2(x), feat_size)) + + return x + + +class OverlapPatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768): + super().__init__() + patch_size = to_2tuple(patch_size) + assert max(patch_size) > stride, "Set larger patch_size than stride" + self.patch_size = patch_size + self.proj = nn.Conv2d( + in_chans, embed_dim, patch_size, + stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2)) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x): + x = self.proj(x) + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + return x + + +class PyramidVisionTransformerStage(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + depth: int, + downsample: bool = True, + num_heads: int = 8, + sr_ratio: int = 1, + linear_attn: bool = False, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: Union[List[float], float] = 0.0, + norm_layer: Callable = LayerNorm, + ): + super().__init__() + self.grad_checkpointing = False + + if downsample: + self.downsample = OverlapPatchEmbed( + patch_size=3, + stride=2, + in_chans=dim, + embed_dim=dim_out, + ) + else: + assert dim == dim_out + self.downsample = None + + self.blocks = nn.ModuleList([Block( + dim=dim_out, + num_heads=num_heads, + sr_ratio=sr_ratio, + linear_attn=linear_attn, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) for i in range(depth)]) + + self.norm = norm_layer(dim_out) + + def forward(self, x): + # x is either B, C, H, W (if downsample) or B, H, W, C if not + if self.downsample is not None: + # input to downsample is B, C, H, W + x = self.downsample(x) # output B, H, W, C + B, H, W, C = x.shape + feat_size = (H, W) + x = x.reshape(B, -1, C) + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(blk, x, feat_size) + else: + x = blk(x, feat_size) + x = self.norm(x) + x = x.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2).contiguous() + return x + + +class PyramidVisionTransformerV2(nn.Module): + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + depths=(3, 4, 6, 3), + embed_dims=(64, 128, 256, 512), + num_heads=(1, 2, 4, 8), + sr_ratios=(8, 4, 2, 1), + mlp_ratios=(8., 8., 4., 4.), + qkv_bias=True, + linear=False, + drop_rate=0., + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=LayerNorm, + ): + super().__init__() + self.num_classes = num_classes + assert global_pool in ('avg', '') + self.global_pool = global_pool + self.depths = depths + num_stages = len(depths) + mlp_ratios = to_ntuple(num_stages)(mlp_ratios) + num_heads = to_ntuple(num_stages)(num_heads) + sr_ratios = to_ntuple(num_stages)(sr_ratios) + assert(len(embed_dims)) == num_stages + self.feature_info = [] + + self.patch_embed = OverlapPatchEmbed( + patch_size=7, + stride=4, + in_chans=in_chans, + embed_dim=embed_dims[0], + ) + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + cur = 0 + prev_dim = embed_dims[0] + stages = [] + for i in range(num_stages): + stages += [PyramidVisionTransformerStage( + dim=prev_dim, + dim_out=embed_dims[i], + depth=depths[i], + downsample=i > 0, + num_heads=num_heads[i], + sr_ratio=sr_ratios[i], + mlp_ratio=mlp_ratios[i], + linear_attn=linear, + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + )] + prev_dim = embed_dims[i] + cur += depths[i] + self.feature_info += [dict(num_chs=prev_dim, reduction=4 * 2**i, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + + # classification head + self.num_features = self.head_hidden_size = embed_dims[-1] + self.head_drop = nn.Dropout(drop_rate) + self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def freeze_patch_emb(self): + self.patch_embed.requires_grad = False + + @torch.jit.ignore + def no_weight_decay(self): + return {} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', # stem and embed + blocks=r'^stages\.(\d+)' + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('avg', '') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x.mean(dim=(-1, -2)) + x = self.head_drop(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ Remap original checkpoints -> timm """ + if 'patch_embed.proj.weight' in state_dict: + return state_dict # non-original checkpoint, no remapping needed + + out_dict = {} + import re + for k, v in state_dict.items(): + if k.startswith('patch_embed'): + k = k.replace('patch_embed1', 'patch_embed') + k = k.replace('patch_embed2', 'stages.1.downsample') + k = k.replace('patch_embed3', 'stages.2.downsample') + k = k.replace('patch_embed4', 'stages.3.downsample') + k = k.replace('dwconv.dwconv', 'dwconv') + k = re.sub(r'block(\d+).(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.blocks.{x.group(2)}', k) + k = re.sub(r'^norm(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.norm', k) + out_dict[k] = v + return out_dict + + +def _create_pvt2(variant, pretrained=False, **kwargs): + default_out_indices = tuple(range(4)) + out_indices = kwargs.pop('out_indices', default_out_indices) + model = build_model_with_cfg( + PyramidVisionTransformerV2, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'fixed_input_size': False, + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'pvt_v2_b0.in1k': _cfg(hf_hub_id='timm/'), + 'pvt_v2_b1.in1k': _cfg(hf_hub_id='timm/'), + 'pvt_v2_b2.in1k': _cfg(hf_hub_id='timm/'), + 'pvt_v2_b3.in1k': _cfg(hf_hub_id='timm/'), + 'pvt_v2_b4.in1k': _cfg(hf_hub_id='timm/'), + 'pvt_v2_b5.in1k': _cfg(hf_hub_id='timm/'), + 'pvt_v2_b2_li.in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def pvt_v2_b0(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: + model_args = dict(depths=(2, 2, 2, 2), embed_dims=(32, 64, 160, 256), num_heads=(1, 2, 5, 8)) + return _create_pvt2('pvt_v2_b0', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pvt_v2_b1(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: + model_args = dict(depths=(2, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) + return _create_pvt2('pvt_v2_b1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pvt_v2_b2(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: + model_args = dict(depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) + return _create_pvt2('pvt_v2_b2', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pvt_v2_b3(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: + model_args = dict(depths=(3, 4, 18, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) + return _create_pvt2('pvt_v2_b3', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pvt_v2_b4(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: + model_args = dict(depths=(3, 8, 27, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) + return _create_pvt2('pvt_v2_b4', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pvt_v2_b5(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: + model_args = dict( + depths=(3, 6, 40, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), mlp_ratios=(4, 4, 4, 4)) + return _create_pvt2('pvt_v2_b5', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def pvt_v2_b2_li(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: + model_args = dict( + depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), linear=True) + return _create_pvt2('pvt_v2_b2_li', pretrained=pretrained, **dict(model_args, **kwargs)) + diff --git a/pytorch-image-models/timm/models/rdnet.py b/pytorch-image-models/timm/models/rdnet.py new file mode 100644 index 0000000000000000000000000000000000000000..af00548c6dc7d493d8f57799a209b9a86dd309c2 --- /dev/null +++ b/pytorch-image-models/timm/models/rdnet.py @@ -0,0 +1,505 @@ +""" +RDNet +Copyright (c) 2024-present NAVER Cloud Corp. +Apache-2.0 +""" + +from functools import partial +from typing import List, Optional, Tuple, Union, Callable + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import DropPath, NormMlpClassifierHead, ClassifierHead, EffectiveSEModule, \ + make_divisible, get_act_layer, get_norm_layer +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import named_apply +from ._registry import register_model, generate_default_cfgs + +__all__ = ["RDNet"] + + +class Block(nn.Module): + def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer): + super().__init__() + self.layers = nn.Sequential( + nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3), + norm_layer(in_chs), + nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0), + act_layer(), + nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0), + ) + + def forward(self, x): + return self.layers(x) + + +class BlockESE(nn.Module): + def __init__(self, in_chs, inter_chs, out_chs, norm_layer, act_layer): + super().__init__() + self.layers = nn.Sequential( + nn.Conv2d(in_chs, in_chs, groups=in_chs, kernel_size=7, stride=1, padding=3), + norm_layer(in_chs), + nn.Conv2d(in_chs, inter_chs, kernel_size=1, stride=1, padding=0), + act_layer(), + nn.Conv2d(inter_chs, out_chs, kernel_size=1, stride=1, padding=0), + EffectiveSEModule(out_chs), + ) + + def forward(self, x): + return self.layers(x) + + +def _get_block_type(block: str): + block = block.lower().strip() + if block == "block": + return Block + elif block == "blockese": + return BlockESE + else: + assert False, f"Unknown block type ({block})." + + +class DenseBlock(nn.Module): + def __init__( + self, + num_input_features: int = 64, + growth_rate: int = 64, + bottleneck_width_ratio: float = 4.0, + drop_path_rate: float = 0.0, + drop_rate: float = 0.0, + rand_gather_step_prob: float = 0.0, + block_idx: int = 0, + block_type: str = "Block", + ls_init_value: float = 1e-6, + norm_layer: str = "layernorm2d", + act_layer: str = "gelu", + ): + super().__init__() + self.drop_rate = drop_rate + self.drop_path_rate = drop_path_rate + self.rand_gather_step_prob = rand_gather_step_prob + self.block_idx = block_idx + self.growth_rate = growth_rate + + self.gamma = nn.Parameter(ls_init_value * torch.ones(growth_rate)) if ls_init_value > 0 else None + growth_rate = int(growth_rate) + inter_chs = int(num_input_features * bottleneck_width_ratio / 8) * 8 + + self.drop_path = DropPath(drop_path_rate) + + self.layers = _get_block_type(block_type)( + in_chs=num_input_features, + inter_chs=inter_chs, + out_chs=growth_rate, + norm_layer=norm_layer, + act_layer=act_layer, + ) + + def forward(self, x: List[torch.Tensor]) -> torch.Tensor: + x = torch.cat(x, 1) + x = self.layers(x) + + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + + x = self.drop_path(x) + return x + + +class DenseStage(nn.Sequential): + def __init__(self, num_block, num_input_features, drop_path_rates, growth_rate, **kwargs): + super().__init__() + for i in range(num_block): + layer = DenseBlock( + num_input_features=num_input_features, + growth_rate=growth_rate, + drop_path_rate=drop_path_rates[i], + block_idx=i, + **kwargs, + ) + num_input_features += growth_rate + self.add_module(f"dense_block{i}", layer) + self.num_out_features = num_input_features + + def forward(self, init_feature: torch.Tensor) -> torch.Tensor: + features = [init_feature] + for module in self: + new_feature = module(features) + features.append(new_feature) + return torch.cat(features, 1) + + +class RDNet(nn.Module): + def __init__( + self, + in_chans: int = 3, # timm option [--in-chans] + num_classes: int = 1000, # timm option [--num-classes] + global_pool: str = 'avg', # timm option [--gp] + growth_rates: Union[List[int], Tuple[int]] = (64, 104, 128, 128, 128, 128, 224), + num_blocks_list: Union[List[int], Tuple[int]] = (3, 3, 3, 3, 3, 3, 3), + block_type: Union[List[int], Tuple[int]] = ("Block",) * 2 + ("BlockESE",) * 5, + is_downsample_block: Union[List[bool], Tuple[bool]] = (None, True, True, False, False, False, True), + bottleneck_width_ratio: float = 4.0, + transition_compression_ratio: float = 0.5, + ls_init_value: float = 1e-6, + stem_type: str = 'patch', + patch_size: int = 4, + num_init_features: int = 64, + head_init_scale: float = 1., + head_norm_first: bool = False, + conv_bias: bool = True, + act_layer: Union[str, Callable] = 'gelu', + norm_layer: str = "layernorm2d", + norm_eps: Optional[float] = None, + drop_rate: float = 0.0, # timm option [--drop: dropout ratio] + drop_path_rate: float = 0.0, # timm option [--drop-path: drop-path ratio] + ): + """ + Args: + in_chans: Number of input image channels. + num_classes: Number of classes for classification head. + global_pool: Global pooling type. + growth_rates: Growth rate at each stage. + num_blocks_list: Number of blocks at each stage. + is_downsample_block: Whether to downsample at each stage. + bottleneck_width_ratio: Bottleneck width ratio (similar to mlp expansion ratio). + transition_compression_ratio: Channel compression ratio of transition layers. + ls_init_value: Init value for Layer Scale, disabled if None. + stem_type: Type of stem. + patch_size: Stem patch size for patch stem. + num_init_features: Number of features of stem. + head_init_scale: Init scaling value for classifier weights and biases. + head_norm_first: Apply normalization before global pool + head. + conv_bias: Use bias layers w/ all convolutions. + act_layer: Activation layer type. + norm_layer: Normalization layer type. + norm_eps: Small value to avoid division by zero in normalization. + drop_rate: Head pre-classifier dropout rate. + drop_path_rate: Stochastic depth drop rate. + """ + super().__init__() + assert len(growth_rates) == len(num_blocks_list) == len(is_downsample_block) + act_layer = get_act_layer(act_layer) + norm_layer = get_norm_layer(norm_layer) + if norm_eps is not None: + norm_layer = partial(norm_layer, eps=norm_eps) + + self.num_classes = num_classes + self.drop_rate = drop_rate + + # stem + assert stem_type in ('patch', 'overlap', 'overlap_tiered') + if stem_type == 'patch': + # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 + self.stem = nn.Sequential( + nn.Conv2d(in_chans, num_init_features, kernel_size=patch_size, stride=patch_size, bias=conv_bias), + norm_layer(num_init_features), + ) + stem_stride = patch_size + else: + mid_chs = make_divisible(num_init_features // 2) if 'tiered' in stem_type else num_init_features + self.stem = nn.Sequential( + nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), + nn.Conv2d(mid_chs, num_init_features, kernel_size=3, stride=2, padding=1, bias=conv_bias), + norm_layer(num_init_features), + ) + stem_stride = 4 + + # features + self.feature_info = [] + self.num_stages = len(growth_rates) + curr_stride = stem_stride + num_features = num_init_features + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(num_blocks_list)).split(num_blocks_list)] + + dense_stages = [] + for i in range(self.num_stages): + dense_stage_layers = [] + if i != 0: + compressed_num_features = int(num_features * transition_compression_ratio / 8) * 8 + k_size = stride = 1 + if is_downsample_block[i]: + curr_stride *= 2 + k_size = stride = 2 + + dense_stage_layers.append(norm_layer(num_features)) + dense_stage_layers.append( + nn.Conv2d(num_features, compressed_num_features, kernel_size=k_size, stride=stride, padding=0) + ) + num_features = compressed_num_features + + stage = DenseStage( + num_block=num_blocks_list[i], + num_input_features=num_features, + growth_rate=growth_rates[i], + bottleneck_width_ratio=bottleneck_width_ratio, + drop_rate=drop_rate, + drop_path_rates=dp_rates[i], + ls_init_value=ls_init_value, + block_type=block_type[i], + norm_layer=norm_layer, + act_layer=act_layer, + ) + dense_stage_layers.append(stage) + num_features += num_blocks_list[i] * growth_rates[i] + + if i + 1 == self.num_stages or (i + 1 != self.num_stages and is_downsample_block[i + 1]): + self.feature_info += [ + dict( + num_chs=num_features, + reduction=curr_stride, + module=f'dense_stages.{i}', + growth_rate=growth_rates[i], + ) + ] + dense_stages.append(nn.Sequential(*dense_stage_layers)) + self.dense_stages = nn.Sequential(*dense_stages) + self.num_features = self.head_hidden_size = num_features + + # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets + # otherwise pool -> norm -> fc, the default RDNet ordering (pretrained NV weights) + if head_norm_first: + self.norm_pre = norm_layer(self.num_features) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + ) + else: + self.norm_pre = nn.Identity() + self.head = NormMlpClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=self.drop_rate, + norm_layer=norm_layer, + ) + + named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.dense_stages) + 1, indices) + + # forward pass + feat_idx = 0 # stem is index 0 + x = self.stem(x) + if feat_idx in take_indices: + intermediates.append(x) + + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + dense_stages = self.dense_stages + else: + dense_stages = self.dense_stages[:max_index] + for stage in dense_stages: + feat_idx += 1 + x = stage(x) + if feat_idx in take_indices: + # NOTE not bothering to apply norm_pre when norm=True as almost no models have it enabled + intermediates.append(x) + + if intermediates_only: + return intermediates + + x = self.norm_pre(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.dense_stages) + 1, indices) + self.dense_stages = self.dense_stages[:max_index] # truncate blocks w/ stem as idx 0 + if prune_norm: + self.norm_pre = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.dense_stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + @torch.jit.ignore + def group_matcher(self, coarse=False): + assert not coarse, "coarse grouping is not implemented for RDNet" + return dict( + stem=r'^stem', + blocks=r'^dense_stages\.(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.dense_stages: + s.grad_checkpointing = enable + + +def _init_weights(module, name=None, head_init_scale=1.0): + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight) + elif isinstance(module, nn.BatchNorm2d): + nn.init.constant_(module.weight, 1) + nn.init.constant_(module.bias, 0) + elif isinstance(module, nn.Linear): + nn.init.constant_(module.bias, 0) + if name and 'head.' in name: + module.weight.data.mul_(head_init_scale) + module.bias.data.mul_(head_init_scale) + + +def checkpoint_filter_fn(state_dict, model): + """ Remap NV checkpoints -> timm """ + if 'stem.0.weight' in state_dict: + return state_dict # non-NV checkpoint + if 'model' in state_dict: + state_dict = state_dict['model'] + + out_dict = {} + + for k, v in state_dict.items(): + k = k.replace('stem.stem.', 'stem.') + out_dict[k] = v + + return out_dict + + +def _create_rdnet(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + RDNet, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs) + return model + + +def _cfg(url='', **kwargs): + return { + "url": url, + "num_classes": 1000, "input_size": (3, 224, 224), "pool_size": (7, 7), + "crop_pct": 0.9, "interpolation": "bicubic", + "mean": IMAGENET_DEFAULT_MEAN, "std": IMAGENET_DEFAULT_STD, + "first_conv": "stem.0", "classifier": "head.fc", + "paper_ids": "arXiv:2403.19588", + "paper_name": "DenseNets Reloaded: Paradigm Shift Beyond ResNets and ViTs", + "origin_url": "https://github.com/naver-ai/rdnet", + **kwargs, + } + + +default_cfgs = generate_default_cfgs({ + 'rdnet_tiny.nv_in1k': _cfg( + hf_hub_id='naver-ai/rdnet_tiny.nv_in1k'), + 'rdnet_small.nv_in1k': _cfg( + hf_hub_id='naver-ai/rdnet_small.nv_in1k'), + 'rdnet_base.nv_in1k': _cfg( + hf_hub_id='naver-ai/rdnet_base.nv_in1k'), + 'rdnet_large.nv_in1k': _cfg( + hf_hub_id='naver-ai/rdnet_large.nv_in1k'), + 'rdnet_large.nv_in1k_ft_in1k_384': _cfg( + hf_hub_id='naver-ai/rdnet_large.nv_in1k_ft_in1k_384', + input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), +}) + + +@register_model +def rdnet_tiny(pretrained=False, **kwargs): + n_layer = 7 + model_args = { + "num_init_features": 64, + "growth_rates": [64] + [104] + [128] * 4 + [224], + "num_blocks_list": [3] * n_layer, + "is_downsample_block": (None, True, True, False, False, False, True), + "transition_compression_ratio": 0.5, + "block_type": ["Block"] + ["Block"] + ["BlockESE"] * 4 + ["BlockESE"], + } + model = _create_rdnet("rdnet_tiny", pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def rdnet_small(pretrained=False, **kwargs): + n_layer = 11 + model_args = { + "num_init_features": 72, + "growth_rates": [64] + [128] + [128] * (n_layer - 4) + [240] * 2, + "num_blocks_list": [3] * n_layer, + "is_downsample_block": (None, True, True, False, False, False, False, False, False, True, False), + "transition_compression_ratio": 0.5, + "block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2, + } + model = _create_rdnet("rdnet_small", pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def rdnet_base(pretrained=False, **kwargs): + n_layer = 11 + model_args = { + "num_init_features": 120, + "growth_rates": [96] + [128] + [168] * (n_layer - 4) + [336] * 2, + "num_blocks_list": [3] * n_layer, + "is_downsample_block": (None, True, True, False, False, False, False, False, False, True, False), + "transition_compression_ratio": 0.5, + "block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2, + } + model = _create_rdnet("rdnet_base", pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def rdnet_large(pretrained=False, **kwargs): + n_layer = 12 + model_args = { + "num_init_features": 144, + "growth_rates": [128] + [192] + [256] * (n_layer - 4) + [360] * 2, + "num_blocks_list": [3] * n_layer, + "is_downsample_block": (None, True, True, False, False, False, False, False, False, False, True, False), + "transition_compression_ratio": 0.5, + "block_type": ["Block"] + ["Block"] + ["BlockESE"] * (n_layer - 4) + ["BlockESE"] * 2, + } + model = _create_rdnet("rdnet_large", pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/registry.py b/pytorch-image-models/timm/models/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..5b68a91e0c471f62af0b46c3bc98e7922b3dff66 --- /dev/null +++ b/pytorch-image-models/timm/models/registry.py @@ -0,0 +1,4 @@ +from ._registry import * + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning) diff --git a/pytorch-image-models/timm/models/regnet.py b/pytorch-image-models/timm/models/regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..1e741bec7b360a38d753f0d32e5dec066a734562 --- /dev/null +++ b/pytorch-image-models/timm/models/regnet.py @@ -0,0 +1,1191 @@ +"""RegNet X, Y, Z, and more + +Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +Paper: `Fast and Accurate Model Scaling` - https://arxiv.org/abs/2103.06877 +Original Impl: None + +Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here) +and cleaned up with more descriptive variable names. + +Weights from original pycls impl have been modified: +* first layer from BGR -> RGB as most PyTorch models are +* removed training specific dict entries from checkpoints and keep model state_dict only +* remap names to match the ones here + +Supports weight loading from torchvision and classy-vision (incl VISSL SEER) + +A number of custom timm model definitions additions including: +* stochastic depth, gradient checkpointing, layer-decay, configurable dilation +* a pre-activation 'V' variant +* only known RegNet-Z model definitions with pretrained weights + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from dataclasses import dataclass, replace +from functools import partial +from typing import Callable, List, Optional, Union, Tuple + +import numpy as np +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import ClassifierHead, AvgPool2dSame, ConvNormAct, SEModule, DropPath, GroupNormAct +from timm.layers import get_act_layer, get_norm_act_layer, create_conv2d, make_divisible +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._manipulate import checkpoint_seq, named_apply +from ._registry import generate_default_cfgs, register_model, register_model_deprecations + +__all__ = ['RegNet', 'RegNetCfg'] # model_registry will add each entrypoint fn to this + + +@dataclass +class RegNetCfg: + depth: int = 21 + w0: int = 80 + wa: float = 42.63 + wm: float = 2.66 + group_size: int = 24 + bottle_ratio: float = 1. + se_ratio: float = 0. + group_min_ratio: float = 0. + stem_width: int = 32 + downsample: Optional[str] = 'conv1x1' + linear_out: bool = False + preact: bool = False + num_features: int = 0 + act_layer: Union[str, Callable] = 'relu' + norm_layer: Union[str, Callable] = 'batchnorm' + + +def quantize_float(f, q): + """Converts a float to the closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_widths_groups_comp(widths, bottle_ratios, groups, min_ratio=0.): + """Adjusts the compatibility of widths and groups.""" + bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)] + if min_ratio: + # torchvision uses a different rounding scheme for ensuring bottleneck widths divisible by group widths + bottleneck_widths = [make_divisible(w_bot, g, min_ratio) for w_bot, g in zip(bottleneck_widths, groups)] + else: + bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)] + widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)] + return widths, groups + + +def generate_regnet(width_slope, width_initial, width_mult, depth, group_size, quant=8): + """Generates per block widths from RegNet parameters.""" + assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % quant == 0 + # TODO dWr scaling? + # depth = int(depth * (scale ** 0.1)) + # width_scale = scale ** 0.4 # dWr scale, exp 0.8 / 2, applied to both group and layer widths + widths_cont = np.arange(depth) * width_slope + width_initial + width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) + widths = np.round(np.divide(width_initial * np.power(width_mult, width_exps), quant)) * quant + num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1 + groups = np.array([group_size for _ in range(num_stages)]) + return widths.astype(int).tolist(), num_stages, groups.astype(int).tolist() + + +def downsample_conv( + in_chs, + out_chs, + kernel_size=1, + stride=1, + dilation=1, + norm_layer=None, + preact=False, +): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + dilation = dilation if kernel_size > 1 else 1 + if preact: + return create_conv2d( + in_chs, + out_chs, + kernel_size, + stride=stride, + dilation=dilation, + ) + else: + return ConvNormAct( + in_chs, + out_chs, + kernel_size, + stride=stride, + dilation=dilation, + norm_layer=norm_layer, + apply_act=False, + ) + + +def downsample_avg( + in_chs, + out_chs, + kernel_size=1, + stride=1, + dilation=1, + norm_layer=None, + preact=False, +): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + pool = nn.Identity() + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + if preact: + conv = create_conv2d(in_chs, out_chs, 1, stride=1) + else: + conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False) + return nn.Sequential(*[pool, conv]) + + +def create_shortcut( + downsample_type, + in_chs, + out_chs, + kernel_size, + stride, + dilation=(1, 1), + norm_layer=None, + preact=False, +): + assert downsample_type in ('avg', 'conv1x1', '', None) + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + dargs = dict(stride=stride, dilation=dilation[0], norm_layer=norm_layer, preact=preact) + if not downsample_type: + return None # no shortcut, no downsample + elif downsample_type == 'avg': + return downsample_avg(in_chs, out_chs, **dargs) + else: + return downsample_conv(in_chs, out_chs, kernel_size=kernel_size, **dargs) + else: + return nn.Identity() # identity shortcut (no downsample) + + +class Bottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__( + self, + in_chs, + out_chs, + stride=1, + dilation=(1, 1), + bottle_ratio=1, + group_size=1, + se_ratio=0.25, + downsample='conv1x1', + linear_out=False, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + drop_block=None, + drop_path_rate=0., + ): + super(Bottleneck, self).__init__() + act_layer = get_act_layer(act_layer) + bottleneck_chs = int(round(out_chs * bottle_ratio)) + groups = bottleneck_chs // group_size + + cargs = dict(act_layer=act_layer, norm_layer=norm_layer) + self.conv1 = ConvNormAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) + self.conv2 = ConvNormAct( + bottleneck_chs, + bottleneck_chs, + kernel_size=3, + stride=stride, + dilation=dilation[0], + groups=groups, + drop_layer=drop_block, + **cargs, + ) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) + else: + self.se = nn.Identity() + self.conv3 = ConvNormAct(bottleneck_chs, out_chs, kernel_size=1, apply_act=False, **cargs) + self.act3 = nn.Identity() if linear_out else act_layer() + self.downsample = create_shortcut( + downsample, + in_chs, + out_chs, + kernel_size=1, + stride=stride, + dilation=dilation, + norm_layer=norm_layer, + ) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.se(x) + x = self.conv3(x) + if self.downsample is not None: + # NOTE stuck with downsample as the attr name due to weight compatibility + # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity() + x = self.drop_path(x) + self.downsample(shortcut) + x = self.act3(x) + return x + + +class PreBottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__( + self, + in_chs, + out_chs, + stride=1, + dilation=(1, 1), + bottle_ratio=1, + group_size=1, + se_ratio=0.25, + downsample='conv1x1', + linear_out=False, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + drop_block=None, + drop_path_rate=0., + ): + super(PreBottleneck, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + bottleneck_chs = int(round(out_chs * bottle_ratio)) + groups = bottleneck_chs // group_size + + self.norm1 = norm_act_layer(in_chs) + self.conv1 = create_conv2d(in_chs, bottleneck_chs, kernel_size=1) + self.norm2 = norm_act_layer(bottleneck_chs) + self.conv2 = create_conv2d( + bottleneck_chs, + bottleneck_chs, + kernel_size=3, + stride=stride, + dilation=dilation[0], + groups=groups, + ) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) + else: + self.se = nn.Identity() + self.norm3 = norm_act_layer(bottleneck_chs) + self.conv3 = create_conv2d(bottleneck_chs, out_chs, kernel_size=1) + self.downsample = create_shortcut( + downsample, + in_chs, + out_chs, + kernel_size=1, + stride=stride, + dilation=dilation, + preact=True, + ) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + pass + + def forward(self, x): + x = self.norm1(x) + shortcut = x + x = self.conv1(x) + x = self.norm2(x) + x = self.conv2(x) + x = self.se(x) + x = self.norm3(x) + x = self.conv3(x) + if self.downsample is not None: + # NOTE stuck with downsample as the attr name due to weight compatibility + # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity() + x = self.drop_path(x) + self.downsample(shortcut) + return x + + +class RegStage(nn.Module): + """Stage (sequence of blocks w/ the same output shape).""" + + def __init__( + self, + depth, + in_chs, + out_chs, + stride, + dilation, + drop_path_rates=None, + block_fn=Bottleneck, + **block_kwargs, + ): + super(RegStage, self).__init__() + self.grad_checkpointing = False + + first_dilation = 1 if dilation in (1, 2) else 2 + for i in range(depth): + block_stride = stride if i == 0 else 1 + block_in_chs = in_chs if i == 0 else out_chs + block_dilation = (first_dilation, dilation) + dpr = drop_path_rates[i] if drop_path_rates is not None else 0. + name = "b{}".format(i + 1) + self.add_module( + name, + block_fn( + block_in_chs, + out_chs, + stride=block_stride, + dilation=block_dilation, + drop_path_rate=dpr, + **block_kwargs, + ) + ) + first_dilation = dilation + + def forward(self, x): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.children(), x) + else: + for block in self.children(): + x = block(x) + return x + + +class RegNet(nn.Module): + """RegNet-X, Y, and Z Models + + Paper: https://arxiv.org/abs/2003.13678 + Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + """ + + def __init__( + self, + cfg: RegNetCfg, + in_chans=3, + num_classes=1000, + output_stride=32, + global_pool='avg', + drop_rate=0., + drop_path_rate=0., + zero_init_last=True, + **kwargs, + ): + """ + + Args: + cfg (RegNetCfg): Model architecture configuration + in_chans (int): Number of input channels (default: 3) + num_classes (int): Number of classifier classes (default: 1000) + output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) + global_pool (str): Global pooling type (default: 'avg') + drop_rate (float): Dropout rate (default: 0.) + drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) + zero_init_last (bool): Zero-init last weight of residual path + kwargs (dict): Extra kwargs overlayed onto cfg + """ + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + cfg = replace(cfg, **kwargs) # update cfg with extra passed kwargs + + # Construct the stem + stem_width = cfg.stem_width + na_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer) + if cfg.preact: + self.stem = create_conv2d(in_chans, stem_width, 3, stride=2) + else: + self.stem = ConvNormAct(in_chans, stem_width, 3, stride=2, **na_args) + self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] + + # Construct the stages + prev_width = stem_width + curr_stride = 2 + per_stage_args, common_args = self._get_stage_args( + cfg, + output_stride=output_stride, + drop_path_rate=drop_path_rate, + ) + assert len(per_stage_args) == 4 + block_fn = PreBottleneck if cfg.preact else Bottleneck + for i, stage_args in enumerate(per_stage_args): + stage_name = "s{}".format(i + 1) + self.add_module( + stage_name, + RegStage( + in_chs=prev_width, + block_fn=block_fn, + **stage_args, + **common_args, + ) + ) + prev_width = stage_args['out_chs'] + curr_stride *= stage_args['stride'] + self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] + + # Construct the head + if cfg.num_features: + self.final_conv = ConvNormAct(prev_width, cfg.num_features, kernel_size=1, **na_args) + self.num_features = cfg.num_features + else: + final_act = cfg.linear_out or cfg.preact + self.final_conv = get_act_layer(cfg.act_layer)() if final_act else nn.Identity() + self.num_features = prev_width + self.head_hidden_size = self.num_features + self.head = ClassifierHead( + in_features=self.num_features, + num_classes=num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + def _get_stage_args(self, cfg: RegNetCfg, default_stride=2, output_stride=32, drop_path_rate=0.): + # Generate RegNet ws per block + widths, num_stages, stage_gs = generate_regnet(cfg.wa, cfg.w0, cfg.wm, cfg.depth, cfg.group_size) + + # Convert to per stage format + stage_widths, stage_depths = np.unique(widths, return_counts=True) + stage_br = [cfg.bottle_ratio for _ in range(num_stages)] + stage_strides = [] + stage_dilations = [] + net_stride = 2 + dilation = 1 + for _ in range(num_stages): + if net_stride >= output_stride: + dilation *= default_stride + stride = 1 + else: + stride = default_stride + net_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + stage_dpr = np.split(np.linspace(0, drop_path_rate, sum(stage_depths)), np.cumsum(stage_depths[:-1])) + + # Adjust the compatibility of ws and gws + stage_widths, stage_gs = adjust_widths_groups_comp( + stage_widths, stage_br, stage_gs, min_ratio=cfg.group_min_ratio) + arg_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_size', 'drop_path_rates'] + per_stage_args = [ + dict(zip(arg_names, params)) for params in + zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_br, stage_gs, stage_dpr) + ] + common_args = dict( + downsample=cfg.downsample, + se_ratio=cfg.se_ratio, + linear_out=cfg.linear_out, + act_layer=cfg.act_layer, + norm_layer=cfg.norm_layer, + ) + return per_stage_args, common_args + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^s(\d+)' if coarse else r'^s(\d+)\.b(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in list(self.children())[1:-1]: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.head.reset(num_classes, pool_type=global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(5, indices) + + # forward pass + feat_idx = 0 + x = self.stem(x) + if feat_idx in take_indices: + intermediates.append(x) + + layer_names = ('s1', 's2', 's3', 's4') + if stop_early: + layer_names = layer_names[:max_index] + for n in layer_names: + feat_idx += 1 + x = getattr(self, n)(x) # won't work with torchscript, but keeps code reasonable, FML + if feat_idx in take_indices: + intermediates.append(x) + + if intermediates_only: + return intermediates + + if feat_idx == 4: + x = self.final_conv(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(5, indices) + layer_names = ('s1', 's2', 's3', 's4') + layer_names = layer_names[max_index:] + for n in layer_names: + setattr(self, n, nn.Identity()) + if max_index < 4: + self.final_conv = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.stem(x) + x = self.s1(x) + x = self.s2(x) + x = self.s3(x) + x = self.s4(x) + x = self.final_conv(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name='', zero_init_last=False): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +def _filter_fn(state_dict): + state_dict = state_dict.get('model', state_dict) + replaces = [ + ('f.a.0', 'conv1.conv'), + ('f.a.1', 'conv1.bn'), + ('f.b.0', 'conv2.conv'), + ('f.b.1', 'conv2.bn'), + ('f.final_bn', 'conv3.bn'), + ('f.se.excitation.0', 'se.fc1'), + ('f.se.excitation.2', 'se.fc2'), + ('f.se', 'se'), + ('f.c.0', 'conv3.conv'), + ('f.c.1', 'conv3.bn'), + ('f.c', 'conv3.conv'), + ('proj.0', 'downsample.conv'), + ('proj.1', 'downsample.bn'), + ('proj', 'downsample.conv'), + ] + if 'classy_state_dict' in state_dict: + # classy-vision & vissl (SEER) weights + import re + state_dict = state_dict['classy_state_dict']['base_model']['model'] + out = {} + for k, v in state_dict['trunk'].items(): + k = k.replace('_feature_blocks.conv1.stem.0', 'stem.conv') + k = k.replace('_feature_blocks.conv1.stem.1', 'stem.bn') + k = re.sub( + r'^_feature_blocks.res\d.block(\d)-(\d+)', + lambda x: f's{int(x.group(1))}.b{int(x.group(2)) + 1}', k) + k = re.sub(r's(\d)\.b(\d+)\.bn', r's\1.b\2.downsample.bn', k) + for s, r in replaces: + k = k.replace(s, r) + out[k] = v + for k, v in state_dict['heads'].items(): + if 'projection_head' in k or 'prototypes' in k: + continue + k = k.replace('0.clf.0', 'head.fc') + out[k] = v + return out + if 'stem.0.weight' in state_dict: + # torchvision weights + import re + out = {} + for k, v in state_dict.items(): + k = k.replace('stem.0', 'stem.conv') + k = k.replace('stem.1', 'stem.bn') + k = re.sub( + r'trunk_output.block(\d)\.block(\d+)\-(\d+)', + lambda x: f's{int(x.group(1))}.b{int(x.group(3)) + 1}', k) + for s, r in replaces: + k = k.replace(s, r) + k = k.replace('fc.', 'head.fc.') + out[k] = v + return out + return state_dict + + +# Model FLOPS = three trailing digits * 10^8 +model_cfgs = dict( + # RegNet-X + regnetx_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13), + regnetx_004=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22), + regnetx_004_tv=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22, group_min_ratio=0.9), + regnetx_006=RegNetCfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16), + regnetx_008=RegNetCfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16), + regnetx_016=RegNetCfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18), + regnetx_032=RegNetCfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25), + regnetx_040=RegNetCfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23), + regnetx_064=RegNetCfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17), + regnetx_080=RegNetCfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23), + regnetx_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19), + regnetx_160=RegNetCfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22), + regnetx_320=RegNetCfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23), + + # RegNet-Y + regnety_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25), + regnety_004=RegNetCfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25), + regnety_006=RegNetCfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25), + regnety_008=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25), + regnety_008_tv=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25, group_min_ratio=0.9), + regnety_016=RegNetCfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25), + regnety_032=RegNetCfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25), + regnety_040=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25), + regnety_064=RegNetCfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25), + regnety_080=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25), + regnety_080_tv=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25, group_min_ratio=0.9), + regnety_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25), + regnety_160=RegNetCfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25), + regnety_320=RegNetCfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25), + regnety_640=RegNetCfg(w0=352, wa=147.48, wm=2.4, group_size=328, depth=20, se_ratio=0.25), + regnety_1280=RegNetCfg(w0=456, wa=160.83, wm=2.52, group_size=264, depth=27, se_ratio=0.25), + regnety_2560=RegNetCfg(w0=640, wa=230.83, wm=2.53, group_size=373, depth=27, se_ratio=0.25), + #regnety_2560=RegNetCfg(w0=640, wa=124.47, wm=2.04, group_size=848, depth=27, se_ratio=0.25), + + # Experimental + regnety_040_sgn=RegNetCfg( + w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25, + act_layer='silu', norm_layer=partial(GroupNormAct, group_size=16)), + + # regnetv = 'preact regnet y' + regnetv_040=RegNetCfg( + depth=22, w0=96, wa=31.41, wm=2.24, group_size=64, se_ratio=0.25, preact=True, act_layer='silu'), + regnetv_064=RegNetCfg( + depth=25, w0=112, wa=33.22, wm=2.27, group_size=72, se_ratio=0.25, preact=True, act_layer='silu', + downsample='avg'), + + # RegNet-Z (unverified) + regnetz_005=RegNetCfg( + depth=21, w0=16, wa=10.7, wm=2.51, group_size=4, bottle_ratio=4.0, se_ratio=0.25, + downsample=None, linear_out=True, num_features=1024, act_layer='silu', + ), + regnetz_040=RegNetCfg( + depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, + downsample=None, linear_out=True, num_features=0, act_layer='silu', + ), + regnetz_040_h=RegNetCfg( + depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, + downsample=None, linear_out=True, num_features=1536, act_layer='silu', + ), +) + + +def _create_regnet(variant, pretrained, **kwargs): + return build_model_with_cfg( + RegNet, variant, pretrained, + model_cfg=model_cfgs[variant], + pretrained_filter_fn=_filter_fn, + **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'test_input_size': (3, 288, 288), 'crop_pct': 0.95, 'test_crop_pct': 1.0, + 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +def _cfgpyc(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + 'license': 'mit', 'origin_url': 'https://github.com/facebookresearch/pycls', **kwargs + } + + +def _cfgtv2(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.965, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + 'license': 'bsd-3-clause', 'origin_url': 'https://github.com/pytorch/vision', **kwargs + } + + +default_cfgs = generate_default_cfgs({ + # timm trained models + 'regnety_032.ra_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth'), + 'regnety_040.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_040_ra3-670e1166.pth'), + 'regnety_064.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_064_ra3-aa26dc7d.pth'), + 'regnety_080.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_080_ra3-1fdc4344.pth'), + 'regnety_120.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), + 'regnety_160.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), + 'regnety_160.lion_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), + + # timm in12k pretrain + 'regnety_120.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821), + 'regnety_160.sw_in12k': _cfg( + hf_hub_id='timm/', + num_classes=11821), + + # timm custom arch (v and z guess) + trained models + 'regnety_040_sgn.untrained': _cfg(url=''), + 'regnetv_040.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_040_ra3-c248f51f.pth', + first_conv='stem'), + 'regnetv_064.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_064_ra3-530616c2.pth', + first_conv='stem'), + + 'regnetz_005.untrained': _cfg(url=''), + 'regnetz_040.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040_ra3-9007edf5.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), + 'regnetz_040_h.ra3_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040h_ra3-f594343b.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), + + # used in DeiT for distillation (from Facebook DeiT GitHub repository) + 'regnety_160.deit_in1k': _cfg( + hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth'), + + 'regnetx_004_tv.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth'), + 'regnetx_008.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth'), + 'regnetx_016.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth'), + 'regnetx_032.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth'), + 'regnetx_080.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth'), + 'regnetx_160.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth'), + 'regnetx_320.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth'), + + 'regnety_004.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth'), + 'regnety_008_tv.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth'), + 'regnety_016.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth'), + 'regnety_032.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth'), + 'regnety_080_tv.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth'), + 'regnety_160.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth'), + 'regnety_320.tv2_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth'), + + 'regnety_160.swag_ft_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth', license='cc-by-nc-4.0', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'regnety_320.swag_ft_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth', license='cc-by-nc-4.0', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'regnety_1280.swag_ft_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth', license='cc-by-nc-4.0', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + + 'regnety_160.swag_lc_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth', license='cc-by-nc-4.0'), + 'regnety_320.swag_lc_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth', license='cc-by-nc-4.0'), + 'regnety_1280.swag_lc_in1k': _cfgtv2( + hf_hub_id='timm/', + url='https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth', license='cc-by-nc-4.0'), + + 'regnety_320.seer_ft_in1k': _cfgtv2( + hf_hub_id='timm/', + license='other', origin_url='https://github.com/facebookresearch/vissl', + url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'regnety_640.seer_ft_in1k': _cfgtv2( + hf_hub_id='timm/', + license='other', origin_url='https://github.com/facebookresearch/vissl', + url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'regnety_1280.seer_ft_in1k': _cfgtv2( + hf_hub_id='timm/', + license='other', origin_url='https://github.com/facebookresearch/vissl', + url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'regnety_2560.seer_ft_in1k': _cfgtv2( + hf_hub_id='timm/', + license='other', origin_url='https://github.com/facebookresearch/vissl', + url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet256_finetuned_in1k_model_final_checkpoint_phase38.torch', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + + 'regnety_320.seer': _cfgtv2( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', + num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), + 'regnety_640.seer': _cfgtv2( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', + num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), + 'regnety_1280.seer': _cfgtv2( + hf_hub_id='timm/', + url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', + num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), + # FIXME invalid weight <-> model match, mistake on their end + #'regnety_2560.seer': _cfgtv2( + # url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_cosine_rg256gf_noBNhead_wd1e5_fairstore_bs16_node64_sinkhorn10_proto16k_apex_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', + # num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), + + 'regnetx_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnetx_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + + 'regnety_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), + 'regnety_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), +}) + + +@register_model +def regnetx_002(pretrained=False, **kwargs) -> RegNet: + """RegNetX-200MF""" + return _create_regnet('regnetx_002', pretrained, **kwargs) + + +@register_model +def regnetx_004(pretrained=False, **kwargs) -> RegNet: + """RegNetX-400MF""" + return _create_regnet('regnetx_004', pretrained, **kwargs) + + +@register_model +def regnetx_004_tv(pretrained=False, **kwargs) -> RegNet: + """RegNetX-400MF w/ torchvision group rounding""" + return _create_regnet('regnetx_004_tv', pretrained, **kwargs) + + +@register_model +def regnetx_006(pretrained=False, **kwargs) -> RegNet: + """RegNetX-600MF""" + return _create_regnet('regnetx_006', pretrained, **kwargs) + + +@register_model +def regnetx_008(pretrained=False, **kwargs) -> RegNet: + """RegNetX-800MF""" + return _create_regnet('regnetx_008', pretrained, **kwargs) + + +@register_model +def regnetx_016(pretrained=False, **kwargs) -> RegNet: + """RegNetX-1.6GF""" + return _create_regnet('regnetx_016', pretrained, **kwargs) + + +@register_model +def regnetx_032(pretrained=False, **kwargs) -> RegNet: + """RegNetX-3.2GF""" + return _create_regnet('regnetx_032', pretrained, **kwargs) + + +@register_model +def regnetx_040(pretrained=False, **kwargs) -> RegNet: + """RegNetX-4.0GF""" + return _create_regnet('regnetx_040', pretrained, **kwargs) + + +@register_model +def regnetx_064(pretrained=False, **kwargs) -> RegNet: + """RegNetX-6.4GF""" + return _create_regnet('regnetx_064', pretrained, **kwargs) + + +@register_model +def regnetx_080(pretrained=False, **kwargs) -> RegNet: + """RegNetX-8.0GF""" + return _create_regnet('regnetx_080', pretrained, **kwargs) + + +@register_model +def regnetx_120(pretrained=False, **kwargs) -> RegNet: + """RegNetX-12GF""" + return _create_regnet('regnetx_120', pretrained, **kwargs) + + +@register_model +def regnetx_160(pretrained=False, **kwargs) -> RegNet: + """RegNetX-16GF""" + return _create_regnet('regnetx_160', pretrained, **kwargs) + + +@register_model +def regnetx_320(pretrained=False, **kwargs) -> RegNet: + """RegNetX-32GF""" + return _create_regnet('regnetx_320', pretrained, **kwargs) + + +@register_model +def regnety_002(pretrained=False, **kwargs) -> RegNet: + """RegNetY-200MF""" + return _create_regnet('regnety_002', pretrained, **kwargs) + + +@register_model +def regnety_004(pretrained=False, **kwargs) -> RegNet: + """RegNetY-400MF""" + return _create_regnet('regnety_004', pretrained, **kwargs) + + +@register_model +def regnety_006(pretrained=False, **kwargs) -> RegNet: + """RegNetY-600MF""" + return _create_regnet('regnety_006', pretrained, **kwargs) + + +@register_model +def regnety_008(pretrained=False, **kwargs) -> RegNet: + """RegNetY-800MF""" + return _create_regnet('regnety_008', pretrained, **kwargs) + + +@register_model +def regnety_008_tv(pretrained=False, **kwargs) -> RegNet: + """RegNetY-800MF w/ torchvision group rounding""" + return _create_regnet('regnety_008_tv', pretrained, **kwargs) + + +@register_model +def regnety_016(pretrained=False, **kwargs) -> RegNet: + """RegNetY-1.6GF""" + return _create_regnet('regnety_016', pretrained, **kwargs) + + +@register_model +def regnety_032(pretrained=False, **kwargs) -> RegNet: + """RegNetY-3.2GF""" + return _create_regnet('regnety_032', pretrained, **kwargs) + + +@register_model +def regnety_040(pretrained=False, **kwargs) -> RegNet: + """RegNetY-4.0GF""" + return _create_regnet('regnety_040', pretrained, **kwargs) + + +@register_model +def regnety_064(pretrained=False, **kwargs) -> RegNet: + """RegNetY-6.4GF""" + return _create_regnet('regnety_064', pretrained, **kwargs) + + +@register_model +def regnety_080(pretrained=False, **kwargs) -> RegNet: + """RegNetY-8.0GF""" + return _create_regnet('regnety_080', pretrained, **kwargs) + + +@register_model +def regnety_080_tv(pretrained=False, **kwargs) -> RegNet: + """RegNetY-8.0GF w/ torchvision group rounding""" + return _create_regnet('regnety_080_tv', pretrained, **kwargs) + + +@register_model +def regnety_120(pretrained=False, **kwargs) -> RegNet: + """RegNetY-12GF""" + return _create_regnet('regnety_120', pretrained, **kwargs) + + +@register_model +def regnety_160(pretrained=False, **kwargs) -> RegNet: + """RegNetY-16GF""" + return _create_regnet('regnety_160', pretrained, **kwargs) + + +@register_model +def regnety_320(pretrained=False, **kwargs) -> RegNet: + """RegNetY-32GF""" + return _create_regnet('regnety_320', pretrained, **kwargs) + + +@register_model +def regnety_640(pretrained=False, **kwargs) -> RegNet: + """RegNetY-64GF""" + return _create_regnet('regnety_640', pretrained, **kwargs) + + +@register_model +def regnety_1280(pretrained=False, **kwargs) -> RegNet: + """RegNetY-128GF""" + return _create_regnet('regnety_1280', pretrained, **kwargs) + + +@register_model +def regnety_2560(pretrained=False, **kwargs) -> RegNet: + """RegNetY-256GF""" + return _create_regnet('regnety_2560', pretrained, **kwargs) + + +@register_model +def regnety_040_sgn(pretrained=False, **kwargs) -> RegNet: + """RegNetY-4.0GF w/ GroupNorm """ + return _create_regnet('regnety_040_sgn', pretrained, **kwargs) + + +@register_model +def regnetv_040(pretrained=False, **kwargs) -> RegNet: + """RegNetV-4.0GF (pre-activation)""" + return _create_regnet('regnetv_040', pretrained, **kwargs) + + +@register_model +def regnetv_064(pretrained=False, **kwargs) -> RegNet: + """RegNetV-6.4GF (pre-activation)""" + return _create_regnet('regnetv_064', pretrained, **kwargs) + + +@register_model +def regnetz_005(pretrained=False, **kwargs) -> RegNet: + """RegNetZ-500MF + NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py + but it's not clear it is equivalent to paper model as not detailed in the paper. + """ + return _create_regnet('regnetz_005', pretrained, zero_init_last=False, **kwargs) + + +@register_model +def regnetz_040(pretrained=False, **kwargs) -> RegNet: + """RegNetZ-4.0GF + NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py + but it's not clear it is equivalent to paper model as not detailed in the paper. + """ + return _create_regnet('regnetz_040', pretrained, zero_init_last=False, **kwargs) + + +@register_model +def regnetz_040_h(pretrained=False, **kwargs) -> RegNet: + """RegNetZ-4.0GF + NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py + but it's not clear it is equivalent to paper model as not detailed in the paper. + """ + return _create_regnet('regnetz_040_h', pretrained, zero_init_last=False, **kwargs) + + +register_model_deprecations(__name__, { + 'regnetz_040h': 'regnetz_040_h', +}) \ No newline at end of file diff --git a/pytorch-image-models/timm/models/repghost.py b/pytorch-image-models/timm/models/repghost.py new file mode 100644 index 0000000000000000000000000000000000000000..4b802d79b6aad8ae40555698e2abba4ecc43ce36 --- /dev/null +++ b/pytorch-image-models/timm/models/repghost.py @@ -0,0 +1,481 @@ +""" +An implementation of RepGhostNet Model as defined in: +RepGhost: A Hardware-Efficient Ghost Module via Re-parameterization. https://arxiv.org/abs/2211.06088 + +Original implementation: https://github.com/ChengpengChen/RepGhost +""" +import copy +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SelectAdaptivePool2d, Linear, make_divisible +from ._builder import build_model_with_cfg +from ._efficientnet_blocks import SqueezeExcite, ConvBnAct +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + +__all__ = ['RepGhostNet'] + + +_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) + + +class RepGhostModule(nn.Module): + def __init__( + self, + in_chs, + out_chs, + kernel_size=1, + dw_size=3, + stride=1, + relu=True, + reparam=True, + ): + super(RepGhostModule, self).__init__() + self.out_chs = out_chs + init_chs = out_chs + new_chs = out_chs + + self.primary_conv = nn.Sequential( + nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), + nn.BatchNorm2d(init_chs), + nn.ReLU(inplace=True) if relu else nn.Identity(), + ) + + fusion_conv = [] + fusion_bn = [] + if reparam: + fusion_conv.append(nn.Identity()) + fusion_bn.append(nn.BatchNorm2d(init_chs)) + + self.fusion_conv = nn.Sequential(*fusion_conv) + self.fusion_bn = nn.Sequential(*fusion_bn) + + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False), + nn.BatchNorm2d(new_chs), + # nn.ReLU(inplace=True) if relu else nn.Identity(), + ) + self.relu = nn.ReLU(inplace=False) if relu else nn.Identity() + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + for conv, bn in zip(self.fusion_conv, self.fusion_bn): + x2 = x2 + bn(conv(x1)) + return self.relu(x2) + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.cheap_operation[0], self.cheap_operation[1]) + for conv, bn in zip(self.fusion_conv, self.fusion_bn): + kernel, bias = self._fuse_bn_tensor(conv, bn, kernel3x3.shape[0], kernel3x3.device) + kernel3x3 += self._pad_1x1_to_3x3_tensor(kernel) + bias3x3 += bias + return kernel3x3, bias3x3 + + @staticmethod + def _pad_1x1_to_3x3_tensor(kernel1x1): + if kernel1x1 is None: + return 0 + else: + return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + @staticmethod + def _fuse_bn_tensor(conv, bn, in_channels=None, device=None): + in_channels = in_channels if in_channels else bn.running_mean.shape[0] + device = device if device else bn.weight.device + if isinstance(conv, nn.Conv2d): + kernel = conv.weight + assert conv.bias is None + else: + assert isinstance(conv, nn.Identity) + kernel = torch.ones(in_channels, 1, 1, 1, device=device) + + if isinstance(bn, nn.BatchNorm2d): + running_mean = bn.running_mean + running_var = bn.running_var + gamma = bn.weight + beta = bn.bias + eps = bn.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + assert isinstance(bn, nn.Identity) + return kernel, torch.zeros(in_channels).to(kernel.device) + + def switch_to_deploy(self): + if len(self.fusion_conv) == 0 and len(self.fusion_bn) == 0: + return + kernel, bias = self.get_equivalent_kernel_bias() + self.cheap_operation = nn.Conv2d( + in_channels=self.cheap_operation[0].in_channels, + out_channels=self.cheap_operation[0].out_channels, + kernel_size=self.cheap_operation[0].kernel_size, + padding=self.cheap_operation[0].padding, + dilation=self.cheap_operation[0].dilation, + groups=self.cheap_operation[0].groups, + bias=True) + self.cheap_operation.weight.data = kernel + self.cheap_operation.bias.data = bias + self.__delattr__('fusion_conv') + self.__delattr__('fusion_bn') + self.fusion_conv = [] + self.fusion_bn = [] + + def reparameterize(self): + self.switch_to_deploy() + + +class RepGhostBottleneck(nn.Module): + """ RepGhost bottleneck w/ optional SE""" + + def __init__( + self, + in_chs, + mid_chs, + out_chs, + dw_kernel_size=3, + stride=1, + act_layer=nn.ReLU, + se_ratio=0., + reparam=True, + ): + super(RepGhostBottleneck, self).__init__() + has_se = se_ratio is not None and se_ratio > 0. + self.stride = stride + + # Point-wise expansion + self.ghost1 = RepGhostModule(in_chs, mid_chs, relu=True, reparam=reparam) + + # Depth-wise convolution + if self.stride > 1: + self.conv_dw = nn.Conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) + self.bn_dw = nn.BatchNorm2d(mid_chs) + else: + self.conv_dw = None + self.bn_dw = None + + # Squeeze-and-excitation + self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None + + # Point-wise linear projection + self.ghost2 = RepGhostModule(mid_chs, out_chs, relu=False, reparam=reparam) + + # shortcut + if in_chs == out_chs and self.stride == 1: + self.shortcut = nn.Sequential() + else: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), + nn.BatchNorm2d(in_chs), + nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + shortcut = x + + # 1st ghost bottleneck + x = self.ghost1(x) + + # Depth-wise convolution + if self.conv_dw is not None: + x = self.conv_dw(x) + x = self.bn_dw(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # 2nd ghost bottleneck + x = self.ghost2(x) + + x += self.shortcut(shortcut) + return x + + +class RepGhostNet(nn.Module): + def __init__( + self, + cfgs, + num_classes=1000, + width=1.0, + in_chans=3, + output_stride=32, + global_pool='avg', + drop_rate=0.2, + reparam=True, + ): + super(RepGhostNet, self).__init__() + # setting of inverted residual blocks + assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' + self.cfgs = cfgs + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.feature_info = [] + + # building first layer + stem_chs = make_divisible(16 * width, 4) + self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) + self.bn1 = nn.BatchNorm2d(stem_chs) + self.act1 = nn.ReLU(inplace=True) + prev_chs = stem_chs + + # building inverted residual blocks + stages = nn.ModuleList([]) + block = RepGhostBottleneck + stage_idx = 0 + net_stride = 2 + for cfg in self.cfgs: + layers = [] + s = 1 + for k, exp_size, c, se_ratio, s in cfg: + out_chs = make_divisible(c * width, 4) + mid_chs = make_divisible(exp_size * width, 4) + layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, reparam=reparam)) + prev_chs = out_chs + if s > 1: + net_stride *= 2 + self.feature_info.append(dict( + num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) + stages.append(nn.Sequential(*layers)) + stage_idx += 1 + + out_chs = make_divisible(exp_size * width * 2, 4) + stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) + self.pool_dim = prev_chs = out_chs + + self.blocks = nn.Sequential(*stages) + + # building last several layers + self.num_features = prev_chs + self.head_hidden_size = out_chs = 1280 + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) + self.act2 = nn.ReLU(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv_stem|bn1', + blocks=[ + (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), + (r'conv_head', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.classifier + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + if global_pool is not None: + # NOTE: cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=True) + else: + x = self.blocks(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.classifier(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + def convert_to_deploy(self): + repghost_model_convert(self, do_copy=False) + + +def repghost_model_convert(model: torch.nn.Module, save_path=None, do_copy=True): + """ + taken from from https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py + """ + if do_copy: + model = copy.deepcopy(model) + for module in model.modules(): + if hasattr(module, 'switch_to_deploy'): + module.switch_to_deploy() + if save_path is not None: + torch.save(model.state_dict(), save_path) + return model + + +def _create_repghostnet(variant, width=1.0, pretrained=False, **kwargs): + """ + Constructs a RepGhostNet model + """ + cfgs = [ + # k, t, c, SE, s + # stage1 + [[3, 8, 16, 0, 1]], + # stage2 + [[3, 24, 24, 0, 2]], + [[3, 36, 24, 0, 1]], + # stage3 + [[5, 36, 40, 0.25, 2]], + [[5, 60, 40, 0.25, 1]], + # stage4 + [[3, 120, 80, 0, 2]], + [[3, 100, 80, 0, 1], + [3, 120, 80, 0, 1], + [3, 120, 80, 0, 1], + [3, 240, 112, 0.25, 1], + [3, 336, 112, 0.25, 1] + ], + # stage5 + [[5, 336, 160, 0.25, 2]], + [[5, 480, 160, 0, 1], + [5, 480, 160, 0.25, 1], + [5, 480, 160, 0, 1], + [5, 480, 160, 0.25, 1] + ] + ] + model_kwargs = dict( + cfgs=cfgs, + width=width, + **kwargs, + ) + return build_model_with_cfg( + RepGhostNet, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True), + **model_kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'repghostnet_050.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_5x_43M_66.95.pth.tar' + ), + 'repghostnet_058.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_58x_60M_68.94.pth.tar' + ), + 'repghostnet_080.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_8x_96M_72.24.pth.tar' + ), + 'repghostnet_100.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_0x_142M_74.22.pth.tar' + ), + 'repghostnet_111.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_11x_170M_75.07.pth.tar' + ), + 'repghostnet_130.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_3x_231M_76.37.pth.tar' + ), + 'repghostnet_150.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_5x_301M_77.45.pth.tar' + ), + 'repghostnet_200.in1k': _cfg( + hf_hub_id='timm/', + # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_2_0x_516M_78.81.pth.tar' + ), +}) + + +@register_model +def repghostnet_050(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-0.5x """ + model = _create_repghostnet('repghostnet_050', width=0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def repghostnet_058(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-0.58x """ + model = _create_repghostnet('repghostnet_058', width=0.58, pretrained=pretrained, **kwargs) + return model + + +@register_model +def repghostnet_080(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-0.8x """ + model = _create_repghostnet('repghostnet_080', width=0.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def repghostnet_100(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-1.0x """ + model = _create_repghostnet('repghostnet_100', width=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def repghostnet_111(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-1.11x """ + model = _create_repghostnet('repghostnet_111', width=1.11, pretrained=pretrained, **kwargs) + return model + +@register_model +def repghostnet_130(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-1.3x """ + model = _create_repghostnet('repghostnet_130', width=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def repghostnet_150(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-1.5x """ + model = _create_repghostnet('repghostnet_150', width=1.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def repghostnet_200(pretrained=False, **kwargs) -> RepGhostNet: + """ RepGhostNet-2.0x """ + model = _create_repghostnet('repghostnet_200', width=2.0, pretrained=pretrained, **kwargs) + return model diff --git a/pytorch-image-models/timm/models/repvit.py b/pytorch-image-models/timm/models/repvit.py new file mode 100644 index 0000000000000000000000000000000000000000..7dcb2cd939f47e7f3ae3553844f5ff5f95a147e4 --- /dev/null +++ b/pytorch-image-models/timm/models/repvit.py @@ -0,0 +1,513 @@ +""" RepViT + +Paper: `RepViT: Revisiting Mobile CNN From ViT Perspective` + - https://arxiv.org/abs/2307.09283 + +@misc{wang2023repvit, + title={RepViT: Revisiting Mobile CNN From ViT Perspective}, + author={Ao Wang and Hui Chen and Zijia Lin and Hengjun Pu and Guiguang Ding}, + year={2023}, + eprint={2307.09283}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} + +Adapted from official impl at https://github.com/jameslahm/RepViT +""" + +__all__ = ['RepVit'] +from typing import Optional + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SqueezeExcite, trunc_normal_, to_ntuple, to_2tuple +from ._builder import build_model_with_cfg +from ._manipulate import checkpoint_seq +from ._registry import register_model, generate_default_cfgs + + +class ConvNorm(nn.Sequential): + def __init__(self, in_dim, out_dim, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): + super().__init__() + self.add_module('c', nn.Conv2d(in_dim, out_dim, ks, stride, pad, dilation, groups, bias=False)) + self.add_module('bn', nn.BatchNorm2d(out_dim)) + nn.init.constant_(self.bn.weight, bn_weight_init) + nn.init.constant_(self.bn.bias, 0) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Conv2d( + w.size(1) * self.c.groups, + w.size(0), + w.shape[2:], + stride=self.c.stride, + padding=self.c.padding, + dilation=self.c.dilation, + groups=self.c.groups, + device=c.weight.device, + ) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class NormLinear(nn.Sequential): + def __init__(self, in_dim, out_dim, bias=True, std=0.02): + super().__init__() + self.add_module('bn', nn.BatchNorm1d(in_dim)) + self.add_module('l', nn.Linear(in_dim, out_dim, bias=bias)) + trunc_normal_(self.l.weight, std=std) + if bias: + nn.init.constant_(self.l.bias, 0) + + @torch.no_grad() + def fuse(self): + bn, l = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[None, :] + if l.bias is None: + b = b @ self.l.weight.T + else: + b = (l.weight @ b[:, None]).view(-1) + self.l.bias + m = nn.Linear(w.size(1), w.size(0), device=l.weight.device) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class RepVggDw(nn.Module): + def __init__(self, ed, kernel_size, legacy=False): + super().__init__() + self.conv = ConvNorm(ed, ed, kernel_size, 1, (kernel_size - 1) // 2, groups=ed) + if legacy: + self.conv1 = ConvNorm(ed, ed, 1, 1, 0, groups=ed) + # Make torchscript happy. + self.bn = nn.Identity() + else: + self.conv1 = nn.Conv2d(ed, ed, 1, 1, 0, groups=ed) + self.bn = nn.BatchNorm2d(ed) + self.dim = ed + self.legacy = legacy + + def forward(self, x): + return self.bn(self.conv(x) + self.conv1(x) + x) + + @torch.no_grad() + def fuse(self): + conv = self.conv.fuse() + + if self.legacy: + conv1 = self.conv1.fuse() + else: + conv1 = self.conv1 + + conv_w = conv.weight + conv_b = conv.bias + conv1_w = conv1.weight + conv1_b = conv1.bias + + conv1_w = nn.functional.pad(conv1_w, [1, 1, 1, 1]) + + identity = nn.functional.pad( + torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1, 1, 1, 1] + ) + + final_conv_w = conv_w + conv1_w + identity + final_conv_b = conv_b + conv1_b + + conv.weight.data.copy_(final_conv_w) + conv.bias.data.copy_(final_conv_b) + + if not self.legacy: + bn = self.bn + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = conv.weight * w[:, None, None, None] + b = bn.bias + (conv.bias - bn.running_mean) * bn.weight / (bn.running_var + bn.eps) ** 0.5 + conv.weight.data.copy_(w) + conv.bias.data.copy_(b) + return conv + + +class RepVitMlp(nn.Module): + def __init__(self, in_dim, hidden_dim, act_layer): + super().__init__() + self.conv1 = ConvNorm(in_dim, hidden_dim, 1, 1, 0) + self.act = act_layer() + self.conv2 = ConvNorm(hidden_dim, in_dim, 1, 1, 0, bn_weight_init=0) + + def forward(self, x): + return self.conv2(self.act(self.conv1(x))) + + +class RepViTBlock(nn.Module): + def __init__(self, in_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy=False): + super(RepViTBlock, self).__init__() + + self.token_mixer = RepVggDw(in_dim, kernel_size, legacy) + self.se = SqueezeExcite(in_dim, 0.25) if use_se else nn.Identity() + self.channel_mixer = RepVitMlp(in_dim, in_dim * mlp_ratio, act_layer) + + def forward(self, x): + x = self.token_mixer(x) + x = self.se(x) + identity = x + x = self.channel_mixer(x) + return identity + x + + +class RepVitStem(nn.Module): + def __init__(self, in_chs, out_chs, act_layer): + super().__init__() + self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) + self.act1 = act_layer() + self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) + self.stride = 4 + + def forward(self, x): + return self.conv2(self.act1(self.conv1(x))) + + +class RepVitDownsample(nn.Module): + def __init__(self, in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy=False): + super().__init__() + self.pre_block = RepViTBlock(in_dim, mlp_ratio, kernel_size, use_se=False, act_layer=act_layer, legacy=legacy) + self.spatial_downsample = ConvNorm(in_dim, in_dim, kernel_size, 2, (kernel_size - 1) // 2, groups=in_dim) + self.channel_downsample = ConvNorm(in_dim, out_dim, 1, 1) + self.ffn = RepVitMlp(out_dim, out_dim * mlp_ratio, act_layer) + + def forward(self, x): + x = self.pre_block(x) + x = self.spatial_downsample(x) + x = self.channel_downsample(x) + identity = x + x = self.ffn(x) + return x + identity + + +class RepVitClassifier(nn.Module): + def __init__(self, dim, num_classes, distillation=False, drop=0.0): + super().__init__() + self.head_drop = nn.Dropout(drop) + self.head = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() + self.distillation = distillation + self.distilled_training = False + self.num_classes = num_classes + if distillation: + self.head_dist = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x): + x = self.head_drop(x) + if self.distillation: + x1, x2 = self.head(x), self.head_dist(x) + if self.training and self.distilled_training and not torch.jit.is_scripting(): + return x1, x2 + else: + return (x1 + x2) / 2 + else: + x = self.head(x) + return x + + @torch.no_grad() + def fuse(self): + if not self.num_classes > 0: + return nn.Identity() + head = self.head.fuse() + if self.distillation: + head_dist = self.head_dist.fuse() + head.weight += head_dist.weight + head.bias += head_dist.bias + head.weight /= 2 + head.bias /= 2 + return head + else: + return head + + +class RepVitStage(nn.Module): + def __init__(self, in_dim, out_dim, depth, mlp_ratio, act_layer, kernel_size=3, downsample=True, legacy=False): + super().__init__() + if downsample: + self.downsample = RepVitDownsample(in_dim, mlp_ratio, out_dim, kernel_size, act_layer, legacy) + else: + assert in_dim == out_dim + self.downsample = nn.Identity() + + blocks = [] + use_se = True + for _ in range(depth): + blocks.append(RepViTBlock(out_dim, mlp_ratio, kernel_size, use_se, act_layer, legacy)) + use_se = not use_se + + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + x = self.blocks(x) + return x + + +class RepVit(nn.Module): + def __init__( + self, + in_chans=3, + img_size=224, + embed_dim=(48,), + depth=(2,), + mlp_ratio=2, + global_pool='avg', + kernel_size=3, + num_classes=1000, + act_layer=nn.GELU, + distillation=True, + drop_rate=0.0, + legacy=False, + ): + super(RepVit, self).__init__() + self.grad_checkpointing = False + self.global_pool = global_pool + self.embed_dim = embed_dim + self.num_classes = num_classes + + in_dim = embed_dim[0] + self.stem = RepVitStem(in_chans, in_dim, act_layer) + stride = self.stem.stride + resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))]) + + num_stages = len(embed_dim) + mlp_ratios = to_ntuple(num_stages)(mlp_ratio) + + self.feature_info = [] + stages = [] + for i in range(num_stages): + downsample = True if i != 0 else False + stages.append( + RepVitStage( + in_dim, + embed_dim[i], + depth[i], + mlp_ratio=mlp_ratios[i], + act_layer=act_layer, + kernel_size=kernel_size, + downsample=downsample, + legacy=legacy, + ) + ) + stage_stride = 2 if downsample else 1 + stride *= stage_stride + resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) + self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] + in_dim = embed_dim[i] + self.stages = nn.Sequential(*stages) + + self.num_features = self.head_hidden_size = embed_dim[-1] + self.head_drop = nn.Dropout(drop_rate) + self.head = RepVitClassifier(embed_dim[-1], num_classes, distillation) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^stem', blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]) # stem and embed + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, distillation: bool = False): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = RepVitClassifier(self.embed_dim[-1], num_classes, distillation) + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.head.distilled_training = enable + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean((2, 3), keepdim=False) + x = self.head_drop(x) + if pre_logits: + return x + return self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + @torch.no_grad() + def fuse(self): + def fuse_children(net): + for child_name, child in net.named_children(): + if hasattr(child, 'fuse'): + fused = child.fuse() + setattr(net, child_name, fused) + fuse_children(fused) + else: + fuse_children(child) + + fuse_children(self) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, + 'input_size': (3, 224, 224), + 'pool_size': (7, 7), + 'crop_pct': 0.95, + 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.c', + 'classifier': ('head.head.l', 'head.head_dist.l'), + **kwargs, + } + + +default_cfgs = generate_default_cfgs( + { + 'repvit_m1.dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m2.dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m3.dist_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m0_9.dist_300e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m0_9.dist_450e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m1_0.dist_300e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m1_0.dist_450e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m1_1.dist_300e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m1_1.dist_450e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m1_5.dist_300e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m1_5.dist_450e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m2_3.dist_300e_in1k': _cfg( + hf_hub_id='timm/', + ), + 'repvit_m2_3.dist_450e_in1k': _cfg( + hf_hub_id='timm/', + ), + } +) + + +def _create_repvit(variant, pretrained=False, **kwargs): + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) + model = build_model_with_cfg( + RepVit, + variant, + pretrained, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs, + ) + return model + + +@register_model +def repvit_m1(pretrained=False, **kwargs): + """ + Constructs a RepViT-M1 model + """ + model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2), legacy=True) + return _create_repvit('repvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def repvit_m2(pretrained=False, **kwargs): + """ + Constructs a RepViT-M2 model + """ + model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2), legacy=True) + return _create_repvit('repvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def repvit_m3(pretrained=False, **kwargs): + """ + Constructs a RepViT-M3 model + """ + model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 18, 2), legacy=True) + return _create_repvit('repvit_m3', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def repvit_m0_9(pretrained=False, **kwargs): + """ + Constructs a RepViT-M0.9 model + """ + model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2)) + return _create_repvit('repvit_m0_9', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def repvit_m1_0(pretrained=False, **kwargs): + """ + Constructs a RepViT-M1.0 model + """ + model_args = dict(embed_dim=(56, 112, 224, 448), depth=(2, 2, 14, 2)) + return _create_repvit('repvit_m1_0', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def repvit_m1_1(pretrained=False, **kwargs): + """ + Constructs a RepViT-M1.1 model + """ + model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2)) + return _create_repvit('repvit_m1_1', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def repvit_m1_5(pretrained=False, **kwargs): + """ + Constructs a RepViT-M1.5 model + """ + model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 24, 4)) + return _create_repvit('repvit_m1_5', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def repvit_m2_3(pretrained=False, **kwargs): + """ + Constructs a RepViT-M2.3 model + """ + model_args = dict(embed_dim=(80, 160, 320, 640), depth=(6, 6, 34, 2)) + return _create_repvit('repvit_m2_3', pretrained=pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/res2net.py b/pytorch-image-models/timm/models/res2net.py new file mode 100644 index 0000000000000000000000000000000000000000..691f929b91db626f09f30e28a48b1cc5c8ab200e --- /dev/null +++ b/pytorch-image-models/timm/models/res2net.py @@ -0,0 +1,227 @@ +""" Res2Net and Res2NeXt +Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ +Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs +from .resnet import ResNet + +__all__ = [] + + +class Bottle2neck(nn.Module): + """ Res2Net/Res2NeXT Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py + """ + expansion = 4 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + cardinality=1, + base_width=26, + scale=4, + dilation=1, + first_dilation=None, + act_layer=nn.ReLU, + norm_layer=None, + attn_layer=None, + **_, + ): + super(Bottle2neck, self).__init__() + self.scale = scale + self.is_first = stride > 1 or downsample is not None + self.num_scales = max(1, scale - 1) + width = int(math.floor(planes * (base_width / 64.0))) * cardinality + self.width = width + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) + self.bn1 = norm_layer(width * scale) + + convs = [] + bns = [] + for i in range(self.num_scales): + convs.append(nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False)) + bns.append(norm_layer(width)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + # FIXME this should probably have count_include_pad=False, but hurts original weights + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + else: + self.pool = None + + self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + self.se = attn_layer(outplanes) if attn_layer is not None else None + + self.relu = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last(self): + if getattr(self.bn3, 'weight', None) is not None: + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + if self.se is not None: + out = self.se(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.relu(out) + + return out + + +def _create_res2net(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'res2net50_26w_4s.in1k': _cfg(hf_hub_id='timm/'), + 'res2net50_48w_2s.in1k': _cfg(hf_hub_id='timm/'), + 'res2net50_14w_8s.in1k': _cfg(hf_hub_id='timm/'), + 'res2net50_26w_6s.in1k': _cfg(hf_hub_id='timm/'), + 'res2net50_26w_8s.in1k': _cfg(hf_hub_id='timm/'), + 'res2net101_26w_4s.in1k': _cfg(hf_hub_id='timm/'), + 'res2next50.in1k': _cfg(hf_hub_id='timm/'), + 'res2net50d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), + 'res2net101d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), +}) + + +@register_model +def res2net50_26w_4s(pretrained=False, **kwargs) -> ResNet: + """Constructs a Res2Net-50 26w4s model. + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4)) + return _create_res2net('res2net50_26w_4s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2net101_26w_4s(pretrained=False, **kwargs) -> ResNet: + """Constructs a Res2Net-101 26w4s model. + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4)) + return _create_res2net('res2net101_26w_4s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2net50_26w_6s(pretrained=False, **kwargs) -> ResNet: + """Constructs a Res2Net-50 26w6s model. + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6)) + return _create_res2net('res2net50_26w_6s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2net50_26w_8s(pretrained=False, **kwargs) -> ResNet: + """Constructs a Res2Net-50 26w8s model. + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8)) + return _create_res2net('res2net50_26w_8s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2net50_48w_2s(pretrained=False, **kwargs) -> ResNet: + """Constructs a Res2Net-50 48w2s model. + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2)) + return _create_res2net('res2net50_48w_2s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2net50_14w_8s(pretrained=False, **kwargs) -> ResNet: + """Constructs a Res2Net-50 14w8s model. + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8)) + return _create_res2net('res2net50_14w_8s', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2next50(pretrained=False, **kwargs) -> ResNet: + """Construct Res2NeXt-50 4s + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4)) + return _create_res2net('res2next50', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2net50d(pretrained=False, **kwargs) -> ResNet: + """Construct Res2Net-50 + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, stem_type='deep', + avg_down=True, stem_width=32, block_args=dict(scale=4)) + return _create_res2net('res2net50d', pretrained, **dict(model_args, **kwargs)) + + +@register_model +def res2net101d(pretrained=False, **kwargs) -> ResNet: + """Construct Res2Net-50 + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, stem_type='deep', + avg_down=True, stem_width=32, block_args=dict(scale=4)) + return _create_res2net('res2net101d', pretrained, **dict(model_args, **kwargs)) diff --git a/pytorch-image-models/timm/models/resnest.py b/pytorch-image-models/timm/models/resnest.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1438017ea11dbd98968e07f64120bcb66a6aac --- /dev/null +++ b/pytorch-image-models/timm/models/resnest.py @@ -0,0 +1,251 @@ +""" ResNeSt Models + +Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang + +Modified for torchscript compat, and consistency with timm by Ross Wightman +""" +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import SplitAttn +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs +from .resnet import ResNet + + +class ResNestBottleneck(nn.Module): + """ResNet Bottleneck + """ + # pylint: disable=unused-argument + expansion = 4 + + def __init__( + self, + inplanes, + planes, + stride=1, + downsample=None, + radix=1, + cardinality=1, + base_width=64, + avd=False, + avd_first=False, + is_first=False, + reduce_first=1, + dilation=1, + first_dilation=None, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_layer=None, + aa_layer=None, + drop_block=None, + drop_path=None, + ): + super(ResNestBottleneck, self).__init__() + assert reduce_first == 1 # not supported + assert attn_layer is None # not supported + assert aa_layer is None # TODO not yet supported + assert drop_path is None # TODO not yet supported + + group_width = int(planes * (base_width / 64.)) * cardinality + first_dilation = first_dilation or dilation + if avd and (stride > 1 or is_first): + avd_stride = stride + stride = 1 + else: + avd_stride = 0 + self.radix = radix + + self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) + self.bn1 = norm_layer(group_width) + self.act1 = act_layer(inplace=True) + self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None + + if self.radix >= 1: + self.conv2 = SplitAttn( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block) + self.bn2 = nn.Identity() + self.drop_block = nn.Identity() + self.act2 = nn.Identity() + else: + self.conv2 = nn.Conv2d( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(group_width) + self.drop_block = drop_block() if drop_block is not None else nn.Identity() + self.act2 = act_layer(inplace=True) + self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None + + self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) + self.bn3 = norm_layer(planes*4) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last(self): + if getattr(self.bn3, 'weight', None) is not None: + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.act1(out) + + if self.avd_first is not None: + out = self.avd_first(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.drop_block(out) + out = self.act2(out) + + if self.avd_last is not None: + out = self.avd_last(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.act3(out) + return out + + +def _create_resnest(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, + variant, + pretrained, + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'), + 'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'), + 'resnest50d.in1k': _cfg(hf_hub_id='timm/'), + 'resnest101e.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnest200e.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), + 'resnest269e.in1k': _cfg( + hf_hub_id='timm/', + input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), + 'resnest50d_4s2x40d.in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic'), + 'resnest50d_1s4x24d.in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic') +}) + + +@register_model +def resnest14d(pretrained=False, **kwargs) -> ResNet: + """ ResNeSt-14d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[1, 1, 1, 1], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False)) + return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) + + +@register_model +def resnest26d(pretrained=False, **kwargs) -> ResNet: + """ ResNeSt-26d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[2, 2, 2, 2], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False)) + return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) + + +@register_model +def resnest50d(pretrained=False, **kwargs) -> ResNet: + """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False)) + return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) + + +@register_model +def resnest101e(pretrained=False, **kwargs) -> ResNet: + """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 23, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False)) + return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) + + +@register_model +def resnest200e(pretrained=False, **kwargs) -> ResNet: + """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 24, 36, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False)) + return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) + + +@register_model +def resnest269e(pretrained=False, **kwargs) -> ResNet: + """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 30, 48, 8], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False)) + return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) + + +@register_model +def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet: + """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, + block_args=dict(radix=4, avd=True, avd_first=True)) + return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) + + +@register_model +def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet: + """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, + block_args=dict(radix=1, avd=True, avd_first=True)) + return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) diff --git a/pytorch-image-models/timm/models/selecsls.py b/pytorch-image-models/timm/models/selecsls.py new file mode 100644 index 0000000000000000000000000000000000000000..fdfa16c3186b4fa3f1d434c50232019f361fc56b --- /dev/null +++ b/pytorch-image-models/timm/models/selecsls.py @@ -0,0 +1,378 @@ +"""PyTorch SelecSLS Net example for ImageNet Classification +License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) +Author: Dushyant Mehta (@mehtadushy) + +SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D +Human Pose Estimation with a Single RGB Camera, Mehta et al." +https://arxiv.org/abs/1907.00837 + +Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models +and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import create_classifier +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['SelecSls'] # model_registry will add each entrypoint fn to this + + +class SequentialList(nn.Sequential): + + def __init__(self, *args): + super(SequentialList, self).__init__(*args) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (List[torch.Tensor]) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (List[torch.Tensor]) + pass + + def forward(self, x) -> List[torch.Tensor]: + for module in self: + x = module(x) + return x + + +class SelectSeq(nn.Module): + def __init__(self, mode='index', index=0): + super(SelectSeq, self).__init__() + self.mode = mode + self.index = index + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor]) -> (torch.Tensor) + pass + + def forward(self, x) -> torch.Tensor: + if self.mode == 'index': + return x[self.index] + else: + return torch.cat(x, dim=1) + + +def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): + if padding is None: + padding = ((stride - 1) + dilation * (k - 1)) // 2 + return nn.Sequential( + nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(out_chs), + nn.ReLU(inplace=True) + ) + + +class SelecSlsBlock(nn.Module): + def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): + super(SelecSlsBlock, self).__init__() + self.stride = stride + self.is_first = is_first + assert stride in [1, 2] + + # Process input with 4 conv blocks with the same number of input and output channels + self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) + self.conv2 = conv_bn(mid_chs, mid_chs, 1) + self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) + self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + if not isinstance(x, list): + x = [x] + assert len(x) in [1, 2] + + d1 = self.conv1(x[0]) + d2 = self.conv3(self.conv2(d1)) + d3 = self.conv5(self.conv4(d2)) + if self.is_first: + out = self.conv6(torch.cat([d1, d2, d3], 1)) + return [out, out] + else: + return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] + + +class SelecSls(nn.Module): + """SelecSls42 / SelecSls60 / SelecSls84 + + Parameters + ---------- + cfg : network config dictionary specifying block type, feature, and head args + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): + self.num_classes = num_classes + super(SelecSls, self).__init__() + + self.stem = conv_bn(in_chans, 32, stride=2) + self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) + self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way + self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) + self.num_features = self.head_hidden_size = cfg['num_features'] + self.feature_info = cfg['feature_info'] + + self.global_pool, self.head_drop, self.fc = create_classifier( + self.num_features, + self.num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + ) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^features\.(\d+)', + blocks_head=r'^head' + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.fc + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + x = self.head(self.from_seq(x)) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.head_drop(x) + return x if pre_logits else self.fc(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_selecsls(variant, pretrained, **kwargs): + cfg = {} + feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] + if variant.startswith('selecsls42'): + cfg['block'] = SelecSlsBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 144, 144, True, 2), + (144, 144, 144, 288, False, 1), + (288, 0, 304, 304, True, 2), + (304, 304, 304, 480, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.3'), + dict(num_chs=480, reduction=16, module='features.5'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls42b': + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant.startswith('selecsls60'): + cfg['block'] = SelecSlsBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 128, 128, True, 2), + (128, 128, 128, 128, False, 1), + (128, 128, 128, 288, False, 1), + (288, 0, 288, 288, True, 2), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 416, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.4'), + dict(num_chs=416, reduction=16, module='features.8'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls60b': + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant == 'selecsls84': + cfg['block'] = SelecSlsBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 144, False, 1), + (144, 0, 144, 144, True, 2), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 304, False, 1), + (304, 0, 304, 304, True, 2), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 512, False, 1), + ] + feature_info.extend([ + dict(num_chs=144, reduction=4, module='features.1'), + dict(num_chs=304, reduction=8, module='features.6'), + dict(num_chs=512, reduction=16, module='features.12'), + ]) + # Head can be replaced with alternative configurations depending on the problem + cfg['head'] = [ + (512, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 3, 1), + ] + cfg['num_features'] = 1280 + feature_info.extend([ + dict(num_chs=1024, reduction=32, module='head.1'), + dict(num_chs=1280, reduction=64, module='head.3') + ]) + else: + raise ValueError('Invalid net configuration ' + variant + ' !!!') + cfg['feature_info'] = feature_info + + # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? + return build_model_with_cfg( + SelecSls, + variant, + pretrained, + model_cfg=cfg, + feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), + **kwargs, + ) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'selecsls42.untrained': _cfg( + interpolation='bicubic'), + 'selecsls42b.in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic'), + 'selecsls60.in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic'), + 'selecsls60b.in1k': _cfg( + hf_hub_id='timm/', + interpolation='bicubic'), + 'selecsls84.untrained': _cfg( + interpolation='bicubic'), +}) + + +@register_model +def selecsls42(pretrained=False, **kwargs) -> SelecSls: + """Constructs a SelecSls42 model. + """ + return _create_selecsls('selecsls42', pretrained, **kwargs) + + +@register_model +def selecsls42b(pretrained=False, **kwargs) -> SelecSls: + """Constructs a SelecSls42_B model. + """ + return _create_selecsls('selecsls42b', pretrained, **kwargs) + + +@register_model +def selecsls60(pretrained=False, **kwargs) -> SelecSls: + """Constructs a SelecSls60 model. + """ + return _create_selecsls('selecsls60', pretrained, **kwargs) + + +@register_model +def selecsls60b(pretrained=False, **kwargs) -> SelecSls: + """Constructs a SelecSls60_B model. + """ + return _create_selecsls('selecsls60b', pretrained, **kwargs) + + +@register_model +def selecsls84(pretrained=False, **kwargs) -> SelecSls: + """Constructs a SelecSls84 model. + """ + return _create_selecsls('selecsls84', pretrained, **kwargs) diff --git a/pytorch-image-models/timm/models/senet.py b/pytorch-image-models/timm/models/senet.py new file mode 100644 index 0000000000000000000000000000000000000000..dd9b149b3d7f1d98344ab554e06d56fbb39596c9 --- /dev/null +++ b/pytorch-image-models/timm/models/senet.py @@ -0,0 +1,465 @@ +""" +SEResNet implementation from Cadene's pretrained models +https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py +Additional credit to https://github.com/creafz + +Original model: https://github.com/hujie-frank/SENet + +ResNet code gently borrowed from +https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py + +FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate +support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. +""" +import math +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import create_classifier +from ._builder import build_model_with_cfg +from ._registry import register_model, generate_default_cfgs + +__all__ = ['SENet'] + + +def _weight_init(m): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = x.mean((2, 3), keepdim=True) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes * 2) + self.conv2 = nn.Conv2d( + planes * 2, planes * 4, kernel_size=3, stride=stride, + padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes * 4) + self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): + super(SEResNeXtBottleneck, self).__init__() + width = math.floor(planes * (base_width / 64)) * groups + self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) + self.bn1 = nn.BatchNorm2d(width) + self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(width) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBlock, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes, reduction=reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SENet(nn.Module): + + def __init__( + self, block, layers, groups, reduction, drop_rate=0.2, + in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, + downsample_padding=0, num_classes=1000, global_pool='avg'): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `last_linear` layer. + - For all models: 1000 + """ + super(SENet, self).__init__() + self.inplanes = inplanes + self.num_classes = num_classes + self.drop_rate = drop_rate + if input_3x3: + layer0_modules = [ + ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), + ('bn1', nn.BatchNorm2d(64)), + ('relu1', nn.ReLU(inplace=True)), + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), + ('bn2', nn.BatchNorm2d(64)), + ('relu2', nn.ReLU(inplace=True)), + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), + ('bn3', nn.BatchNorm2d(inplanes)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ('conv1', nn.Conv2d( + in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), + ('bn1', nn.BatchNorm2d(inplanes)), + ('relu1', nn.ReLU(inplace=True)), + ] + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. + self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] + self.num_features = self.head_hidden_size = 512 * block.expansion + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + _weight_init(m) + + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, + downsample_kernel_size=1, downsample_padding=0): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, + stride=stride, padding=downsample_padding, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^layer0', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.last_linear + + def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.layer0(x) + x = self.pool0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.last_linear(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_senet(variant, pretrained=False, **kwargs): + return build_model_with_cfg(SENet, variant, pretrained, **kwargs) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'legacy_senet154.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'), + 'legacy_seresnet18.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', + interpolation='bicubic'), + 'legacy_seresnet34.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), + 'legacy_seresnet50.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), + 'legacy_seresnet101.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), + 'legacy_seresnet152.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), + 'legacy_seresnext26_32x4d.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', + interpolation='bicubic'), + 'legacy_seresnext50_32x4d.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'), + 'legacy_seresnext101_32x4d.in1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth'), +}) + + +@register_model +def legacy_seresnet18(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet18', pretrained, **model_args) + + +@register_model +def legacy_seresnet34(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet34', pretrained, **model_args) + + +@register_model +def legacy_seresnet50(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet50', pretrained, **model_args) + + +@register_model +def legacy_seresnet101(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet101', pretrained, **model_args) + + +@register_model +def legacy_seresnet152(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet152', pretrained, **model_args) + + +@register_model +def legacy_senet154(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, + downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) + return _create_senet('legacy_senet154', pretrained, **model_args) + + +@register_model +def legacy_seresnext26_32x4d(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext50_32x4d(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext101_32x4d(pretrained=False, **kwargs) -> SENet: + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/pytorch-image-models/timm/models/sequencer.py b/pytorch-image-models/timm/models/sequencer.py new file mode 100644 index 0000000000000000000000000000000000000000..86c4b1df4d1180f83bea257de2c3009f5ae9e932 --- /dev/null +++ b/pytorch-image-models/timm/models/sequencer.py @@ -0,0 +1,540 @@ +""" Sequencer + +Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf + +""" +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import math +from functools import partial +from itertools import accumulate +from typing import Optional, Tuple + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead +from ._builder import build_model_with_cfg +from ._manipulate import named_apply +from ._registry import register_model, generate_default_cfgs + +__all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): + stdv = 1.0 / math.sqrt(module.hidden_size) + for weight in module.parameters(): + nn.init.uniform_(weight, -stdv, stdv) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +class RNNIdentity(nn.Module): + def __init__(self, *args, **kwargs): + super(RNNIdentity, self).__init__() + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]: + return x, None + + +class RNN2dBase(nn.Module): + + def __init__( + self, + input_size: int, + hidden_size: int, + num_layers: int = 1, + bias: bool = True, + bidirectional: bool = True, + union="cat", + with_fc=True, + ): + super().__init__() + + self.input_size = input_size + self.hidden_size = hidden_size + self.output_size = 2 * hidden_size if bidirectional else hidden_size + self.union = union + + self.with_vertical = True + self.with_horizontal = True + self.with_fc = with_fc + + self.fc = None + if with_fc: + if union == "cat": + self.fc = nn.Linear(2 * self.output_size, input_size) + elif union == "add": + self.fc = nn.Linear(self.output_size, input_size) + elif union == "vertical": + self.fc = nn.Linear(self.output_size, input_size) + self.with_horizontal = False + elif union == "horizontal": + self.fc = nn.Linear(self.output_size, input_size) + self.with_vertical = False + else: + raise ValueError("Unrecognized union: " + union) + elif union == "cat": + pass + if 2 * self.output_size != input_size: + raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.") + elif union == "add": + pass + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + elif union == "vertical": + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + self.with_horizontal = False + elif union == "horizontal": + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + self.with_vertical = False + else: + raise ValueError("Unrecognized union: " + union) + + self.rnn_v = RNNIdentity() + self.rnn_h = RNNIdentity() + + def forward(self, x): + B, H, W, C = x.shape + + if self.with_vertical: + v = x.permute(0, 2, 1, 3) + v = v.reshape(-1, H, C) + v, _ = self.rnn_v(v) + v = v.reshape(B, W, H, -1) + v = v.permute(0, 2, 1, 3) + else: + v = None + + if self.with_horizontal: + h = x.reshape(-1, W, C) + h, _ = self.rnn_h(h) + h = h.reshape(B, H, W, -1) + else: + h = None + + if v is not None and h is not None: + if self.union == "cat": + x = torch.cat([v, h], dim=-1) + else: + x = v + h + elif v is not None: + x = v + elif h is not None: + x = h + + if self.fc is not None: + x = self.fc(x) + + return x + + +class LSTM2d(RNN2dBase): + + def __init__( + self, + input_size: int, + hidden_size: int, + num_layers: int = 1, + bias: bool = True, + bidirectional: bool = True, + union="cat", + with_fc=True, + ): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) + if self.with_vertical: + self.rnn_v = nn.LSTM( + input_size, + hidden_size, + num_layers, + batch_first=True, + bias=bias, + bidirectional=bidirectional, + ) + if self.with_horizontal: + self.rnn_h = nn.LSTM( + input_size, + hidden_size, + num_layers, + batch_first=True, + bias=bias, + bidirectional=bidirectional, + ) + + +class Sequencer2dBlock(nn.Module): + def __init__( + self, + dim, + hidden_size, + mlp_ratio=3.0, + rnn_layer=LSTM2d, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + num_layers=1, + bidirectional=True, + union="cat", + with_fc=True, + drop=0., + drop_path=0., + ): + super().__init__() + channels_dim = int(mlp_ratio * dim) + self.norm1 = norm_layer(dim) + self.rnn_tokens = rnn_layer( + dim, + hidden_size, + num_layers=num_layers, + bidirectional=bidirectional, + union=union, + with_fc=with_fc, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Shuffle(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + if self.training: + B, H, W, C = x.shape + r = torch.randperm(H * W) + x = x.reshape(B, -1, C) + x = x[:, r, :].reshape(B, H, W, -1) + return x + + +class Downsample2d(nn.Module): + def __init__(self, input_dim, output_dim, patch_size): + super().__init__() + self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.down(x) + x = x.permute(0, 2, 3, 1) + return x + + +class Sequencer2dStage(nn.Module): + def __init__( + self, + dim, + dim_out, + depth, + patch_size, + hidden_size, + mlp_ratio, + downsample=False, + block_layer=Sequencer2dBlock, + rnn_layer=LSTM2d, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + num_layers=1, + bidirectional=True, + union="cat", + with_fc=True, + drop=0., + drop_path=0., + ): + super().__init__() + if downsample: + self.downsample = Downsample2d(dim, dim_out, patch_size) + else: + assert dim == dim_out + self.downsample = nn.Identity() + + blocks = [] + for block_idx in range(depth): + blocks.append(block_layer( + dim_out, + hidden_size, + mlp_ratio=mlp_ratio, + rnn_layer=rnn_layer, + mlp_layer=mlp_layer, + norm_layer=norm_layer, + act_layer=act_layer, + num_layers=num_layers, + bidirectional=bidirectional, + union=union, + with_fc=with_fc, + drop=drop, + drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path, + )) + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + x = self.blocks(x) + return x + + +class Sequencer2d(nn.Module): + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + global_pool='avg', + layers=(4, 3, 8, 3), + patch_sizes=(7, 2, 2, 1), + embed_dims=(192, 384, 384, 384), + hidden_sizes=(48, 96, 96, 96), + mlp_ratios=(3.0, 3.0, 3.0, 3.0), + block_layer=Sequencer2dBlock, + rnn_layer=LSTM2d, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + num_rnn_layers=1, + bidirectional=True, + union="cat", + with_fc=True, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + ): + super().__init__() + assert global_pool in ('', 'avg') + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.head_hidden_size = embed_dims[-1] # for consistency with other models + self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC) + self.output_fmt = 'NHWC' + self.feature_info = [] + + self.stem = PatchEmbed( + img_size=None, + patch_size=patch_sizes[0], + in_chans=in_chans, + embed_dim=embed_dims[0], + norm_layer=norm_layer if stem_norm else None, + flatten=False, + output_fmt='NHWC', + ) + + assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) + reductions = list(accumulate(patch_sizes, lambda x, y: x * y)) + stages = [] + prev_dim = embed_dims[0] + for i, _ in enumerate(embed_dims): + stages += [Sequencer2dStage( + prev_dim, + embed_dims[i], + depth=layers[i], + downsample=i > 0, + patch_size=patch_sizes[i], + hidden_size=hidden_sizes[i], + mlp_ratio=mlp_ratios[i], + block_layer=block_layer, + rnn_layer=rnn_layer, + mlp_layer=mlp_layer, + norm_layer=norm_layer, + act_layer=act_layer, + num_layers=num_rnn_layers, + bidirectional=bidirectional, + union=union, + with_fc=with_fc, + drop=drop_rate, + drop_path=drop_path_rate, + )] + prev_dim = embed_dims[i] + self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + self.norm = norm_layer(embed_dims[-1]) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + input_fmt=self.output_fmt, + ) + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=[ + (r'^stages\.(\d+)', None), + (r'^norm', (99999,)) + ] if coarse else [ + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^stages\.(\d+)\.downsample', (0,)), + (r'^norm', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ Remap original checkpoints -> timm """ + if 'stages.0.blocks.0.norm1.weight' in state_dict: + return state_dict # already translated checkpoint + if 'model' in state_dict: + state_dict = state_dict['model'] + + import re + out_dict = {} + for k, v in state_dict.items(): + k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k) + k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = k.replace('head.', 'head.fc.') + out_dict[k] = v + + return out_dict + + +def _create_sequencer2d(variant, pretrained=False, **kwargs): + default_out_indices = tuple(range(3)) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + Sequencer2d, + variant, + pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs, + ) + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.proj', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'), + 'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'), + 'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'), +}) + + +@register_model +def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d: + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2d, + bidirectional=True, + union="cat", + with_fc=True, + ) + model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d: + model_args = dict( + layers=[4, 3, 14, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2d, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs)) + return model + + +@register_model +def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d: + model_args = dict( + layers=[8, 8, 16, 4], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2d, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs)) + return model diff --git a/pytorch-image-models/timm/models/swin_transformer.py b/pytorch-image-models/timm/models/swin_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..54d57f81489780cc196b15c90ac004703d9b8e9a --- /dev/null +++ b/pytorch-image-models/timm/models/swin_transformer.py @@ -0,0 +1,1069 @@ +""" Swin Transformer +A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` + - https://arxiv.org/pdf/2103.14030 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +S3 (AutoFormerV2, https://arxiv.org/abs/2111.14725) Swin weights from + - https://github.com/microsoft/Cream/tree/main/AutoFormerV2 + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import logging +import math +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.layers import PatchEmbed, Mlp, DropPath, ClassifierHead, to_2tuple, to_ntuple, trunc_normal_, \ + _assert, use_fused_attn, resize_rel_pos_bias_table, resample_patch_embed, ndgrid +from ._builder import build_model_with_cfg +from ._features import feature_take_indices +from ._features_fx import register_notrace_function +from ._manipulate import checkpoint_seq, named_apply +from ._registry import generate_default_cfgs, register_model, register_model_deprecations +from .vision_transformer import get_init_weights_vit + +__all__ = ['SwinTransformer'] # model_registry will add each entrypoint fn to this + +_logger = logging.getLogger(__name__) + +_int_or_tuple_2_t = Union[int, Tuple[int, int]] + + +def window_partition( + x: torch.Tensor, + window_size: Tuple[int, int], +) -> torch.Tensor: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: Tuple[int, int], H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + C = windows.shape[-1] + x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) + return x + + +def get_relative_position_index(win_h: int, win_w: int): + # get pair-wise relative position index for each token inside the window + coords = torch.stack(ndgrid(torch.arange(win_h), torch.arange(win_w))) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += win_h - 1 # shift to start from 0 + relative_coords[:, :, 1] += win_w - 1 + relative_coords[:, :, 0] *= 2 * win_w - 1 + return relative_coords.sum(-1) # Wh*Ww, Wh*Ww + + +class WindowAttention(nn.Module): + """ Window based multi-head self attention (W-MSA) module with relative position bias. + It supports shifted and non-shifted windows. + """ + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim: int, + num_heads: int, + head_dim: Optional[int] = None, + window_size: _int_or_tuple_2_t = 7, + qkv_bias: bool = True, + attn_drop: float = 0., + proj_drop: float = 0., + ): + """ + Args: + dim: Number of input channels. + num_heads: Number of attention heads. + head_dim: Number of channels per head (dim // num_heads if not set) + window_size: The height and width of the window. + qkv_bias: If True, add a learnable bias to query, key, value. + attn_drop: Dropout ratio of attention weight. + proj_drop: Dropout ratio of output. + """ + super().__init__() + self.dim = dim + self.window_size = to_2tuple(window_size) # Wh, Ww + win_h, win_w = self.window_size + self.window_area = win_h * win_w + self.num_heads = num_heads + head_dim = head_dim or dim // num_heads + attn_dim = head_dim * num_heads + self.scale = head_dim ** -0.5 + self.fused_attn = use_fused_attn(experimental=True) # NOTE not tested for prime-time yet + + # define a parameter table of relative position bias, shape: 2*Wh-1 * 2*Ww-1, nH + self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * win_h - 1) * (2 * win_w - 1), num_heads)) + + # get pair-wise relative position index for each token inside the window + self.register_buffer("relative_position_index", get_relative_position_index(win_h, win_w), persistent=False) + + self.qkv = nn.Linear(dim, attn_dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(attn_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def set_window_size(self, window_size: Tuple[int, int]) -> None: + """Update window size & interpolate position embeddings + Args: + window_size (int): New window size + """ + window_size = to_2tuple(window_size) + if window_size == self.window_size: + return + self.window_size = window_size + win_h, win_w = self.window_size + self.window_area = win_h * win_w + with torch.no_grad(): + new_bias_shape = (2 * win_h - 1) * (2 * win_w - 1), self.num_heads + self.relative_position_bias_table = nn.Parameter( + resize_rel_pos_bias_table( + self.relative_position_bias_table, + new_window_size=self.window_size, + new_bias_shape=new_bias_shape, + )) + self.register_buffer("relative_position_index", get_relative_position_index(win_h, win_w), persistent=False) + + def _get_rel_pos_bias(self) -> torch.Tensor: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view(self.window_area, self.window_area, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + return relative_position_bias.unsqueeze(0) + + def forward(self, x, mask: Optional[torch.Tensor] = None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + if self.fused_attn: + attn_mask = self._get_rel_pos_bias() + if mask is not None: + num_win = mask.shape[0] + mask = mask.view(1, num_win, 1, N, N).expand(B_ // num_win, -1, self.num_heads, -1, -1) + attn_mask = attn_mask + mask.reshape(-1, self.num_heads, N, N) + x = torch.nn.functional.scaled_dot_product_attention( + q, k, v, + attn_mask=attn_mask, + dropout_p=self.attn_drop.p if self.training else 0., + ) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn + self._get_rel_pos_bias() + if mask is not None: + num_win = mask.shape[0] + attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.transpose(1, 2).reshape(B_, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """ Swin Transformer Block. + """ + + def __init__( + self, + dim: int, + input_resolution: _int_or_tuple_2_t, + num_heads: int = 4, + head_dim: Optional[int] = None, + window_size: _int_or_tuple_2_t = 7, + shift_size: int = 0, + always_partition: bool = False, + dynamic_mask: bool = False, + mlp_ratio: float = 4., + qkv_bias: bool = True, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + ): + """ + Args: + dim: Number of input channels. + input_resolution: Input resolution. + window_size: Window size. + num_heads: Number of attention heads. + head_dim: Enforce the number of channels per head + shift_size: Shift size for SW-MSA. + always_partition: Always partition into full windows and shift + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: If True, add a learnable bias to query, key, value. + proj_drop: Dropout rate. + attn_drop: Attention dropout rate. + drop_path: Stochastic depth rate. + act_layer: Activation layer. + norm_layer: Normalization layer. + """ + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.target_shift_size = to_2tuple(shift_size) # store for later resize + self.always_partition = always_partition + self.dynamic_mask = dynamic_mask + self.window_size, self.shift_size = self._calc_window_shift(window_size, shift_size) + self.window_area = self.window_size[0] * self.window_size[1] + self.mlp_ratio = mlp_ratio + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + num_heads=num_heads, + head_dim=head_dim, + window_size=self.window_size, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=proj_drop, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.register_buffer( + "attn_mask", + None if self.dynamic_mask else self.get_attn_mask(), + persistent=False, + ) + + def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]: + if any(self.shift_size): + # calculate attention mask for SW-MSA + if x is not None: + H, W = x.shape[1], x.shape[2] + device = x.device + dtype = x.dtype + else: + H, W = self.input_resolution + device = None + dtype = None + H = math.ceil(H / self.window_size[0]) * self.window_size[0] + W = math.ceil(W / self.window_size[1]) * self.window_size[1] + img_mask = torch.zeros((1, H, W, 1), dtype=dtype, device=device) # 1 H W 1 + cnt = 0 + for h in ( + (0, -self.window_size[0]), + (-self.window_size[0], -self.shift_size[0]), + (-self.shift_size[0], None), + ): + for w in ( + (0, -self.window_size[1]), + (-self.window_size[1], -self.shift_size[1]), + (-self.shift_size[1], None), + ): + img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt + cnt += 1 + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_area) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + return attn_mask + + def _calc_window_shift( + self, + target_window_size: Union[int, Tuple[int, int]], + target_shift_size: Optional[Union[int, Tuple[int, int]]] = None, + ) -> Tuple[Tuple[int, int], Tuple[int, int]]: + target_window_size = to_2tuple(target_window_size) + if target_shift_size is None: + # if passed value is None, recalculate from default window_size // 2 if it was previously non-zero + target_shift_size = self.target_shift_size + if any(target_shift_size): + target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2) + else: + target_shift_size = to_2tuple(target_shift_size) + + if self.always_partition: + return target_window_size, target_shift_size + + window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] + shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] + return tuple(window_size), tuple(shift_size) + + def set_input_size( + self, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + always_partition: Optional[bool] = None, + ): + """ + Args: + feat_size: New input resolution + window_size: New window size + always_partition: Change always_partition attribute if not None + """ + self.input_resolution = feat_size + if always_partition is not None: + self.always_partition = always_partition + self.window_size, self.shift_size = self._calc_window_shift(window_size) + self.window_area = self.window_size[0] * self.window_size[1] + self.attn.set_window_size(self.window_size) + self.register_buffer( + "attn_mask", + None if self.dynamic_mask else self.get_attn_mask(), + persistent=False, + ) + + def _attn(self, x): + B, H, W, C = x.shape + + # cyclic shift + has_shift = any(self.shift_size) + if has_shift: + shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) + else: + shifted_x = x + + # pad for resolution not divisible by window size + pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] + pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] + shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h)) + _, Hp, Wp, _ = shifted_x.shape + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + if getattr(self, 'dynamic_mask', False): + attn_mask = self.get_attn_mask(shifted_x) + else: + attn_mask = self.attn_mask + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + shifted_x = shifted_x[:, :H, :W, :].contiguous() + + # reverse cyclic shift + if has_shift: + x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) + else: + x = shifted_x + return x + + def forward(self, x): + B, H, W, C = x.shape + x = x + self.drop_path1(self._attn(self.norm1(x))) + x = x.reshape(B, -1, C) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + x = x.reshape(B, H, W, C) + return x + + +class PatchMerging(nn.Module): + """ Patch Merging Layer. + """ + + def __init__( + self, + dim: int, + out_dim: Optional[int] = None, + norm_layer: Callable = nn.LayerNorm, + ): + """ + Args: + dim: Number of input channels. + out_dim: Number of output channels (or 2 * dim if None) + norm_layer: Normalization layer. + """ + super().__init__() + self.dim = dim + self.out_dim = out_dim or 2 * dim + self.norm = norm_layer(4 * dim) + self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) + + def forward(self, x): + B, H, W, C = x.shape + + pad_values = (0, 0, 0, W % 2, 0, H % 2) + x = nn.functional.pad(x, pad_values) + _, H, W, _ = x.shape + + x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) + x = self.norm(x) + x = self.reduction(x) + return x + + +class SwinTransformerStage(nn.Module): + """ A basic Swin Transformer layer for one stage. + """ + + def __init__( + self, + dim: int, + out_dim: int, + input_resolution: Tuple[int, int], + depth: int, + downsample: bool = True, + num_heads: int = 4, + head_dim: Optional[int] = None, + window_size: _int_or_tuple_2_t = 7, + always_partition: bool = False, + dynamic_mask: bool = False, + mlp_ratio: float = 4., + qkv_bias: bool = True, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: Union[List[float], float] = 0., + norm_layer: Callable = nn.LayerNorm, + ): + """ + Args: + dim: Number of input channels. + out_dim: Number of output channels. + input_resolution: Input resolution. + depth: Number of blocks. + downsample: Downsample layer at the end of the layer. + num_heads: Number of attention heads. + head_dim: Channels per head (dim // num_heads if not set) + window_size: Local window size. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: If True, add a learnable bias to query, key, value. + proj_drop: Projection dropout rate. + attn_drop: Attention dropout rate. + drop_path: Stochastic depth rate. + norm_layer: Normalization layer. + """ + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution + self.depth = depth + self.grad_checkpointing = False + window_size = to_2tuple(window_size) + shift_size = tuple([w // 2 for w in window_size]) + + # patch merging layer + if downsample: + self.downsample = PatchMerging( + dim=dim, + out_dim=out_dim, + norm_layer=norm_layer, + ) + else: + assert dim == out_dim + self.downsample = nn.Identity() + + # build blocks + self.blocks = nn.Sequential(*[ + SwinTransformerBlock( + dim=out_dim, + input_resolution=self.output_resolution, + num_heads=num_heads, + head_dim=head_dim, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else shift_size, + always_partition=always_partition, + dynamic_mask=dynamic_mask, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) + for i in range(depth)]) + + def set_input_size( + self, + feat_size: Tuple[int, int], + window_size: int, + always_partition: Optional[bool] = None, + ): + """ Updates the resolution, window size and so the pair-wise relative positions. + + Args: + feat_size: New input (feature) resolution + window_size: New window size + always_partition: Always partition / shift the window + """ + self.input_resolution = feat_size + if isinstance(self.downsample, nn.Identity): + self.output_resolution = feat_size + else: + self.output_resolution = tuple(i // 2 for i in feat_size) + for block in self.blocks: + block.set_input_size( + feat_size=self.output_resolution, + window_size=window_size, + always_partition=always_partition, + ) + + def forward(self, x): + x = self.downsample(x) + + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class SwinTransformer(nn.Module): + """ Swin Transformer + + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + """ + + def __init__( + self, + img_size: _int_or_tuple_2_t = 224, + patch_size: int = 4, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + embed_dim: int = 96, + depths: Tuple[int, ...] = (2, 2, 6, 2), + num_heads: Tuple[int, ...] = (3, 6, 12, 24), + head_dim: Optional[int] = None, + window_size: _int_or_tuple_2_t = 7, + always_partition: bool = False, + strict_img_size: bool = True, + mlp_ratio: float = 4., + qkv_bias: bool = True, + drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0.1, + embed_layer: Callable = PatchEmbed, + norm_layer: Union[str, Callable] = nn.LayerNorm, + weight_init: str = '', + **kwargs, + ): + """ + Args: + img_size: Input image size. + patch_size: Patch size. + in_chans: Number of input image channels. + num_classes: Number of classes for classification head. + embed_dim: Patch embedding dimension. + depths: Depth of each Swin Transformer layer. + num_heads: Number of attention heads in different layers. + head_dim: Dimension of self-attention heads. + window_size: Window size. + mlp_ratio: Ratio of mlp hidden dim to embedding dim. + qkv_bias: If True, add a learnable bias to query, key, value. + drop_rate: Dropout rate. + attn_drop_rate (float): Attention dropout rate. + drop_path_rate (float): Stochastic depth rate. + embed_layer: Patch embedding layer. + norm_layer (nn.Module): Normalization layer. + """ + super().__init__() + assert global_pool in ('', 'avg') + self.num_classes = num_classes + self.global_pool = global_pool + self.output_fmt = 'NHWC' + + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (self.num_layers - 1)) + self.feature_info = [] + + if not isinstance(embed_dim, (tuple, list)): + embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + + # split image into non-overlapping patches + self.patch_embed = embed_layer( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim[0], + norm_layer=norm_layer, + strict_img_size=strict_img_size, + output_fmt='NHWC', + ) + patch_grid = self.patch_embed.grid_size + + # build layers + head_dim = to_ntuple(self.num_layers)(head_dim) + if not isinstance(window_size, (list, tuple)): + window_size = to_ntuple(self.num_layers)(window_size) + elif len(window_size) == 2: + window_size = (window_size,) * self.num_layers + assert len(window_size) == self.num_layers + mlp_ratio = to_ntuple(self.num_layers)(mlp_ratio) + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + layers = [] + in_dim = embed_dim[0] + scale = 1 + for i in range(self.num_layers): + out_dim = embed_dim[i] + layers += [SwinTransformerStage( + dim=in_dim, + out_dim=out_dim, + input_resolution=( + patch_grid[0] // scale, + patch_grid[1] // scale + ), + depth=depths[i], + downsample=i > 0, + num_heads=num_heads[i], + head_dim=head_dim[i], + window_size=window_size[i], + always_partition=always_partition, + dynamic_mask=not strict_img_size, + mlp_ratio=mlp_ratio[i], + qkv_bias=qkv_bias, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + )] + in_dim = out_dim + if i > 0: + scale *= 2 + self.feature_info += [dict(num_chs=out_dim, reduction=patch_size * scale, module=f'layers.{i}')] + self.layers = nn.Sequential(*layers) + + self.norm = norm_layer(self.num_features) + self.head = ClassifierHead( + self.num_features, + num_classes, + pool_type=global_pool, + drop_rate=drop_rate, + input_fmt=self.output_fmt, + ) + if weight_init != 'skip': + self.init_weights(weight_init) + + @torch.jit.ignore + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'moco', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + named_apply(get_init_weights_vit(mode, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + nwd = set() + for n, _ in self.named_parameters(): + if 'relative_position_bias_table' in n: + nwd.add(n) + return nwd + + def set_input_size( + self, + img_size: Optional[Tuple[int, int]] = None, + patch_size: Optional[Tuple[int, int]] = None, + window_size: Optional[Tuple[int, int]] = None, + window_ratio: int = 8, + always_partition: Optional[bool] = None, + ) -> None: + """ Updates the image resolution and window size. + + Args: + img_size: New input resolution, if None current resolution is used + patch_size (Optional[Tuple[int, int]): New patch size, if None use current patch size + window_size: New window size, if None based on new_img_size // window_div + window_ratio: divisor for calculating window size from grid size + always_partition: always partition into windows and shift (even if window size < feat size) + """ + if img_size is not None or patch_size is not None: + self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) + patch_grid = self.patch_embed.grid_size + + if window_size is None: + window_size = tuple([pg // window_ratio for pg in patch_grid]) + + for index, stage in enumerate(self.layers): + stage_scale = 2 ** max(index - 1, 0) + stage.set_input_size( + feat_size=(patch_grid[0] // stage_scale, patch_grid[1] // stage_scale), + window_size=window_size, + always_partition=always_partition, + ) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^patch_embed', # stem and embed + blocks=r'^layers\.(\d+)' if coarse else [ + (r'^layers\.(\d+).downsample', (0,)), + (r'^layers\.(\d+)\.\w+\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for l in self.layers: + l.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self) -> nn.Module: + return self.head.fc + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): + self.num_classes = num_classes + self.head.reset(num_classes, pool_type=global_pool) + + def forward_intermediates( + self, + x: torch.Tensor, + indices: Optional[Union[int, List[int]]] = None, + norm: bool = False, + stop_early: bool = False, + output_fmt: str = 'NCHW', + intermediates_only: bool = False, + ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: + """ Forward features that returns intermediates. + + Args: + x: Input image tensor + indices: Take last n blocks if int, all if None, select matching indices if sequence + norm: Apply norm layer to compatible intermediates + stop_early: Stop iterating over blocks when last desired intermediate hit + output_fmt: Shape of intermediate feature outputs + intermediates_only: Only return intermediate features + Returns: + + """ + assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' + intermediates = [] + take_indices, max_index = feature_take_indices(len(self.layers), indices) + + # forward pass + x = self.patch_embed(x) + + num_stages = len(self.layers) + if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript + stages = self.layers + else: + stages = self.layers[:max_index + 1] + for i, stage in enumerate(stages): + x = stage(x) + if i in take_indices: + if norm and i == num_stages - 1: + x_inter = self.norm(x) # applying final norm last intermediate + else: + x_inter = x + x_inter = x_inter.permute(0, 3, 1, 2).contiguous() + intermediates.append(x_inter) + + if intermediates_only: + return intermediates + + x = self.norm(x) + + return x, intermediates + + def prune_intermediate_layers( + self, + indices: Union[int, List[int]] = 1, + prune_norm: bool = False, + prune_head: bool = True, + ): + """ Prune layers not required for specified intermediates. + """ + take_indices, max_index = feature_take_indices(len(self.layers), indices) + self.layers = self.layers[:max_index + 1] # truncate blocks + if prune_norm: + self.norm = nn.Identity() + if prune_head: + self.reset_classifier(0, '') + return take_indices + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.layers(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=True) if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + old_weights = True + if 'head.fc.weight' in state_dict: + old_weights = False + import re + out_dict = {} + state_dict = state_dict.get('model', state_dict) + state_dict = state_dict.get('state_dict', state_dict) + for k, v in state_dict.items(): + if any([n in k for n in ('relative_position_index', 'attn_mask')]): + continue # skip buffers that should not be persistent + + if 'patch_embed.proj.weight' in k: + _, _, H, W = model.patch_embed.proj.weight.shape + if v.shape[-2] != H or v.shape[-1] != W: + v = resample_patch_embed( + v, + (H, W), + interpolation='bicubic', + antialias=True, + verbose=True, + ) + + if k.endswith('relative_position_bias_table'): + m = model.get_submodule(k[:-29]) + if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: + v = resize_rel_pos_bias_table( + v, + new_window_size=m.window_size, + new_bias_shape=m.relative_position_bias_table.shape, + ) + + if old_weights: + k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) + k = k.replace('head.', 'head.fc.') + + out_dict[k] = v + return out_dict + + +def _create_swin_transformer(variant, pretrained=False, **kwargs): + default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) + out_indices = kwargs.pop('out_indices', default_out_indices) + + model = build_model_with_cfg( + SwinTransformer, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + + return model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', + 'license': 'mit', **kwargs + } + + +default_cfgs = generate_default_cfgs({ + 'swin_small_patch4_window7_224.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22kto1k_finetune.pth', ), + 'swin_base_patch4_window7_224.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',), + 'swin_base_patch4_window12_384.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'swin_large_patch4_window7_224.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',), + 'swin_large_patch4_window12_384.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + + 'swin_tiny_patch4_window7_224.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',), + 'swin_small_patch4_window7_224.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',), + 'swin_base_patch4_window7_224.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth',), + 'swin_base_patch4_window12_384.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + + # tiny 22k pretrain is worse than 1k, so moved after (untagged priority is based on order) + 'swin_tiny_patch4_window7_224.ms_in22k_ft_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22kto1k_finetune.pth',), + + 'swin_tiny_patch4_window7_224.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22k.pth', + num_classes=21841), + 'swin_small_patch4_window7_224.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22k.pth', + num_classes=21841), + 'swin_base_patch4_window7_224.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', + num_classes=21841), + 'swin_base_patch4_window12_384.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841), + 'swin_large_patch4_window7_224.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', + num_classes=21841), + 'swin_large_patch4_window12_384.ms_in22k': _cfg( + hf_hub_id='timm/', + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841), + + 'swin_s3_tiny_224.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_t-1d53f6a8.pth'), + 'swin_s3_small_224.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_s-3bb4c69d.pth'), + 'swin_s3_base_224.ms_in1k': _cfg( + hf_hub_id='timm/', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_b-a1e95db4.pth'), +}) + + +@register_model +def swin_tiny_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-T @ 224x224, trained ImageNet-1k + """ + model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer( + 'swin_tiny_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_small_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-S @ 224x224 + """ + model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer( + 'swin_small_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_base_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-B @ 224x224 + """ + model_args = dict(patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) + return _create_swin_transformer( + 'swin_base_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_base_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-B @ 384x384 + """ + model_args = dict(patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) + return _create_swin_transformer( + 'swin_base_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_large_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-L @ 224x224 + """ + model_args = dict(patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) + return _create_swin_transformer( + 'swin_large_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_large_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-L @ 384x384 + """ + model_args = dict(patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) + return _create_swin_transformer( + 'swin_large_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_s3_tiny_224(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-S3-T @ 224x224, https://arxiv.org/abs/2111.14725 + """ + model_args = dict( + patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer('swin_s3_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_s3_small_224(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-S3-S @ 224x224, https://arxiv.org/abs/2111.14725 + """ + model_args = dict( + patch_size=4, window_size=(14, 14, 14, 7), embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer('swin_s3_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +@register_model +def swin_s3_base_224(pretrained=False, **kwargs) -> SwinTransformer: + """ Swin-S3-B @ 224x224, https://arxiv.org/abs/2111.14725 + """ + model_args = dict( + patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 30, 2), num_heads=(3, 6, 12, 24)) + return _create_swin_transformer('swin_s3_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) + + +register_model_deprecations(__name__, { + 'swin_base_patch4_window7_224_in22k': 'swin_base_patch4_window7_224.ms_in22k', + 'swin_base_patch4_window12_384_in22k': 'swin_base_patch4_window12_384.ms_in22k', + 'swin_large_patch4_window7_224_in22k': 'swin_large_patch4_window7_224.ms_in22k', + 'swin_large_patch4_window12_384_in22k': 'swin_large_patch4_window12_384.ms_in22k', +}) diff --git a/pytorch-image-models/timm/utils/model_ema.py b/pytorch-image-models/timm/utils/model_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc7e88fc2397a023ee45df164bb81216c6413c4 --- /dev/null +++ b/pytorch-image-models/timm/utils/model_ema.py @@ -0,0 +1,260 @@ +""" Exponential Moving Average (EMA) of model updates + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +from collections import OrderedDict +from copy import deepcopy +from typing import Optional + +import torch +import torch.nn as nn + +_logger = logging.getLogger(__name__) + + +class ModelEma: + """ Model Exponential Moving Average (DEPRECATED) + + Keep a moving average of everything in the model state_dict (parameters and buffers). + This version is deprecated, it does not work with scripted models. Will be removed eventually. + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + _logger.info("Loaded state_dict_ema") + else: + _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) + + +class ModelEmaV2(nn.Module): + """ Model Exponential Moving Average V2 + + Keep a moving average of everything in the model state_dict (parameters and buffers). + V2 of this module is simpler, it does not match params/buffers based on name but simply + iterates in order. It works with torchscript (JIT of full model). + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device=None): + super().__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + +class ModelEmaV3(nn.Module): + """ Model Exponential Moving Average V3 + + Keep a moving average of everything in the model state_dict (parameters and buffers). + V3 of this module leverages for_each and in-place operations for faster performance. + + Decay warmup based on code by @crowsonkb, her comments: + If inv_gamma=1 and power=1, implements a simple average. inv_gamma=1, power=2/3 are + good values for models you plan to train for a million or more steps (reaches decay + factor 0.999 at 31.6K steps, 0.9999 at 1M steps), inv_gamma=1, power=3/4 for models + you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at + 215.4k steps). + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__( + self, + model, + decay: float = 0.9999, + min_decay: float = 0.0, + update_after_step: int = 0, + use_warmup: bool = False, + warmup_gamma: float = 1.0, + warmup_power: float = 2/3, + device: Optional[torch.device] = None, + foreach: bool = True, + exclude_buffers: bool = False, + ): + super().__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.min_decay = min_decay + self.update_after_step = update_after_step + self.use_warmup = use_warmup + self.warmup_gamma = warmup_gamma + self.warmup_power = warmup_power + self.foreach = foreach + self.device = device # perform ema on different device from model if set + self.exclude_buffers = exclude_buffers + if self.device is not None and device != next(model.parameters()).device: + self.foreach = False # cannot use foreach methods with different devices + self.module.to(device=device) + + def get_decay(self, step: Optional[int] = None) -> float: + """ + Compute the decay factor for the exponential moving average. + """ + if step is None: + return self.decay + + step = max(0, step - self.update_after_step - 1) + if step <= 0: + return 0.0 + + if self.use_warmup: + decay = 1 - (1 + step / self.warmup_gamma) ** -self.warmup_power + decay = max(min(decay, self.decay), self.min_decay) + else: + decay = self.decay + + return decay + + @torch.no_grad() + def update(self, model, step: Optional[int] = None): + decay = self.get_decay(step) + if self.exclude_buffers: + self.apply_update_no_buffers_(model, decay) + else: + self.apply_update_(model, decay) + + def apply_update_(self, model, decay: float): + # interpolate parameters and buffers + if self.foreach: + ema_lerp_values = [] + model_lerp_values = [] + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if ema_v.is_floating_point(): + ema_lerp_values.append(ema_v) + model_lerp_values.append(model_v) + else: + ema_v.copy_(model_v) + + if hasattr(torch, '_foreach_lerp_'): + torch._foreach_lerp_(ema_lerp_values, model_lerp_values, weight=1. - decay) + else: + torch._foreach_mul_(ema_lerp_values, scalar=decay) + torch._foreach_add_(ema_lerp_values, model_lerp_values, alpha=1. - decay) + else: + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if ema_v.is_floating_point(): + ema_v.lerp_(model_v.to(device=self.device), weight=1. - decay) + else: + ema_v.copy_(model_v.to(device=self.device)) + + def apply_update_no_buffers_(self, model, decay: float): + # interpolate parameters, copy buffers + ema_params = tuple(self.module.parameters()) + model_params = tuple(model.parameters()) + if self.foreach: + if hasattr(torch, '_foreach_lerp_'): + torch._foreach_lerp_(ema_params, model_params, weight=1. - decay) + else: + torch._foreach_mul_(ema_params, scalar=decay) + torch._foreach_add_(ema_params, model_params, alpha=1 - decay) + else: + for ema_p, model_p in zip(ema_params, model_params): + ema_p.lerp_(model_p.to(device=self.device), weight=1. - decay) + + for ema_b, model_b in zip(self.module.buffers(), model.buffers()): + ema_b.copy_(model_b.to(device=self.device)) + + @torch.no_grad() + def set(self, model): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + ema_v.copy_(model_v.to(device=self.device)) + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) \ No newline at end of file