"""
Modified from https://github.com/facebookresearch/deit/blob/main/cait_models.py
"""
from functools import partial
import oneflow as flow
import oneflow.nn as nn
from ..layers import Mlp, PatchEmbed, trunc_normal_, DropPath
from .registry import ModelCreator
from .utils import load_state_dict_from_url
__all__ = [
"cait_M48_448",
"cait_M36_384",
"cait_S36_384",
"cait_S24_384",
"cait_S24_224",
"cait_XS24_384",
]
model_urls = {
"cait_XS24": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/CaiT/XS24_384.zip",
"cait_S24_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/CaiT/S24_224.zip",
"cait_S24": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/CaiT/S24_384.zip",
"cait_S36": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/CaiT/S36_384.zip",
"cait_M36": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/CaiT/M36_384.zip",
"cait_M48": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/CaiT/M48_448.zip",
}
class Class_Attention(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to do CA
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
q = (
self.q(x[:, 0])
.unsqueeze(1)
.reshape(B, 1, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
)
k = (
self.k(x)
.reshape(B, N, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
)
q = q * self.scale
v = (
self.v(x)
.reshape(B, N, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
)
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C)
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls)
return x_cls
class LayerScale_Block_CA(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add CA and LayerScale
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
Attention_block=Class_Attention,
Mlp_block=Mlp,
init_values=1e-4,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention_block(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp_block(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.gamma_1 = nn.Parameter(init_values * flow.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * flow.ones((dim)), requires_grad=True)
def forward(self, x, x_cls):
u = flow.cat((x_cls, x), dim=1)
x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u)))
x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls)))
return x_cls
class Attention_talking_head(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf)
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_l = nn.Linear(num_heads, num_heads)
self.proj_w = nn.Linear(num_heads, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1)
attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
attn = attn.softmax(dim=-1)
attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale_Block(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add layerScale
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
Attention_block=Attention_talking_head,
Mlp_block=Mlp,
init_values=1e-4,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention_block(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp_block(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.gamma_1 = nn.Parameter(init_values * flow.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * flow.ones((dim)), requires_grad=True)
def forward(self, x):
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x)))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class cait_models(nn.Module):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to adapt to our cait models
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
global_pool=None,
block_layers=LayerScale_Block,
block_layers_token=LayerScale_Block_CA,
Patch_layer=PatchEmbed,
act_layer=nn.GELU,
Attention_block=Attention_talking_head,
Mlp_block=Mlp,
init_scale=1e-4,
Attention_block_token_only=Class_Attention,
Mlp_block_token_only=Mlp,
depth_token_only=2,
mlp_ratio_clstk=4.0,
):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.patch_embed = Patch_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(flow.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(flow.zeros(1, num_patches, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [drop_path_rate for i in range(depth)]
self.blocks = nn.ModuleList(
[
block_layers(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
Attention_block=Attention_block,
Mlp_block=Mlp_block,
init_values=init_scale,
)
for i in range(depth)
]
)
self.blocks_token_only = nn.ModuleList(
[
block_layers_token(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio_clstk,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=norm_layer,
act_layer=act_layer,
Attention_block=Attention_block_token_only,
Mlp_block=Mlp_block_token_only,
init_values=init_scale,
)
for i in range(depth_token_only)
]
)
self.norm = norm_layer(embed_dim)
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module="head")]
self.head = (
nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = x + self.pos_embed
x = self.pos_drop(x)
for i, blk in enumerate(self.blocks):
x = blk(x)
for i, blk in enumerate(self.blocks_token_only):
cls_tokens = blk(x, cls_tokens)
x = flow.cat((cls_tokens, x), dim=1)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
[docs]@ModelCreator.register_model
def cait_XS24_384(pretrained=False, progress=True, **kwargs):
"""
Constructs the CaiT-XS24-384 model.
.. note::
CaiT-XS24-384 model from `"Going Deeper With Image Transformers" <https://arxiv.org/pdf/2103.17239.pdf>`_.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> cait_XS24_384 = flowvision.models.cait_XS24_384(pretrained=False, progress=True)
"""
model = cait_models(
img_size=384,
patch_size=16,
embed_dim=288,
depth=24,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_scale=1e-5,
depth_token_only=2,
**kwargs
)
if pretrained:
state_dict = load_state_dict_from_url(
model_urls["cait_XS24"], progress=progress
)
model.load_state_dict(state_dict)
return model
[docs]@ModelCreator.register_model
def cait_S24_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the CaiT-S24-224 model.
.. note::
CaiT-S24-224 model from `"Going Deeper With Image Transformers" <https://arxiv.org/pdf/2103.17239.pdf>`_.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> cait_S24_224 = flowvision.models.cait_S24_224(pretrained=False, progress=True)
"""
model = cait_models(
img_size=224,
patch_size=16,
embed_dim=384,
depth=24,
num_heads=8,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_scale=1e-5,
depth_token_only=2,
**kwargs
)
if pretrained:
state_dict = load_state_dict_from_url(
model_urls["cait_S24_224"], progress=progress
)
model.load_state_dict(state_dict)
return model
[docs]@ModelCreator.register_model
def cait_S24_384(pretrained=False, progress=True, **kwargs):
"""
Constructs the CaiT-S24-384 model.
.. note::
CaiT-S24-384 model from `"Going Deeper With Image Transformers" <https://arxiv.org/pdf/2103.17239.pdf>`_.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> cait_S24_384 = flowvision.models.cait_S24_384(pretrained=False, progress=True)
"""
model = cait_models(
img_size=384,
patch_size=16,
embed_dim=384,
depth=24,
num_heads=8,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_scale=1e-5,
depth_token_only=2,
**kwargs
)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["cait_S24"], progress=progress)
model.load_state_dict(state_dict)
return model
[docs]@ModelCreator.register_model
def cait_S36_384(pretrained=False, progress=True, **kwargs):
"""
Constructs the CaiT-S36-384 model.
.. note::
CaiT-S36-384 model from `"Going Deeper With Image Transformers" <https://arxiv.org/pdf/2103.17239.pdf>`_.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> cait_S36_384 = flowvision.models.cait_S36_384(pretrained=False, progress=True)
"""
model = cait_models(
img_size=384,
patch_size=16,
embed_dim=384,
depth=36,
num_heads=8,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_scale=1e-6,
depth_token_only=2,
**kwargs
)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["cait_S36"], progress=progress)
model.load_state_dict(state_dict)
return model
[docs]@ModelCreator.register_model
def cait_M36_384(pretrained=False, progress=True, **kwargs):
"""
Constructs the CaiT-M36-384 model.
.. note::
CaiT-M36-384 model from `"Going Deeper With Image Transformers" <https://arxiv.org/pdf/2103.17239.pdf>`_.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> cait_M36_384 = flowvision.models.cait_M36_384(pretrained=False, progress=True)
"""
model = cait_models(
img_size=384,
patch_size=16,
embed_dim=768,
depth=36,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_scale=1e-6,
depth_token_only=2,
**kwargs
)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["cait_M36"], progress=progress)
model.load_state_dict(state_dict)
return model
[docs]@ModelCreator.register_model
def cait_M48_448(pretrained=False, progress=True, **kwargs):
"""
Constructs the CaiT-M48-448 model.
.. note::
CaiT-M48-448 model from `"Going Deeper With Image Transformers" <https://arxiv.org/pdf/2103.17239.pdf>`_.
The required input size of the model is 448x448.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> cait_M48_448 = flowvision.models.cait_M48_448(pretrained=False, progress=True)
"""
model = cait_models(
img_size=448,
patch_size=16,
embed_dim=768,
depth=48,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
init_scale=1e-6,
depth_token_only=2,
**kwargs
)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["cait_M48"], progress=progress)
model.load_state_dict(state_dict)
return model