"""
@File : vit.py
@Author : CodeCat
@Time : 2021/7/8 下午2:13
"""
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
class PatchEmbed(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None):
super(PatchEmbed, self).__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}, {W}) does not match model ({self.img_size[0]}, {self.img_size[1]})"
# flatten: [B, C, H, W] -> [B, C, HW]
# transpose: [B, C, HW] -> [B, HW, C]
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
return x
class Attention(nn.Module):
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop_ratio=0.,
proj_drop_ratio=0.):
super(Attention, self).__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim*3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_ratio)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop_ratio)
def forward(self, x):
# [batch_size, num_patches+1, total_embed_dim]
B, N, C = x.shape
# qkv(): -> [batch_size, num_patches+1, 3*total_embed_dim]
# reshape: -> [batch_size, num_patched+1, 3, num_heads, embed_dim_per_head]
# permute: -> [3, batch_size, num_heads, num_patches+1, embed_dim_per_head]
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
# transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches+1]
# @: -> [batch_size, num_heads, num_patches+1, num_patched+1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# @: -> [batch_size, num_heads, num_patches+1, embed_dim_per_head]
# transpose: -> [batch_size, num_patches+1, num_heads, embed_dim_per_head]
# reshape: -> [batch_size, num_patches+1, total_embed_dim]
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MLP(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super(MLP, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block(nn.Module):
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_ratio=0.,
attn_drop_ratio=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm):
super(Block, self).__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim=dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)
def forward(self, x):
x = x + self.attn(self.norm1(x))
x = x + self.mlp(self.norm2(x))
return x
class VisionTransformer(nn.Module):
def __init__(self,
img_size=224, patch_size=16, in_c=3, embed_dim=768,
num_classes=1000, depth=12,
num_heads=12, qkv_bias=True, qk_scale=None, mlp_ratio=4.0,
representation_size=None, drop_ratio=0., attn_drop_ratio=0.,
embed_layer=PatchEmbed, norm_layer=None, act_layer=None):
super(VisionTransformer, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.num_tokens = 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
# Patch layer
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c,
embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches+self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(drop_ratio)
# Transfromer Encoder layer
self.blocks = nn.Sequential(*[
Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, norm_layer=norm_layer, act_layer=act_layer)
for _ in range(depth)
])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size:
self.has_logits = True
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
("fc", nn.Linear(embed_dim, representation_size)),
("act", nn.Tanh())
]))
else:
self.has_logits = False
self.pre_logits = nn.Identity()
# Classifier layer
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# weight init
nn.init.trunc_normal_(self.pos_embed, std=0.02)
nn.init.trunc_normal_(self.cls_token, std=0.02)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def forward(self, x):
# [B, C, H, W] -> [B, num_patches, embed_dim]
x = self.patch_embed(x)
# [1, 1, embed_dim] -> [B, 1, embed_dim]
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
# [B, num_patches+1, embed_dim]
x = torch.cat((cls_token, x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
x = self.pre_logits(x[:, 0])
return x
def vit_base_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True):
"""
paper: https://arxiv.org/abs/2010.11929
weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth
"""
model = Visi
没有合适的资源?快使用搜索试试~ 我知道了~
VisionTransformer图像分类
共10个文件
py:7个
json:1个
pth:1个
0 下载量 167 浏览量
2024-05-08
11:53:18
上传
评论
收藏 306.63MB ZIP 举报
温馨提示
VisionTransformer算法实现的图像分类,包含训练代码以及检测代码,数据集见 https://download.csdn.net/download/reset2021/89263991 下载后,可以修改train中的类别以及数据集地址训练其他数据集模型
资源推荐
资源详情
资源评论
收起资源包目录
vit.zip (10个子文件)
vit_predict.py 2KB
cow_chute_class_indices.json 102B
weights
vit_base_patch16_224.pth 327.36MB
data
tulip.jpg 35KB
vit_train.py 5KB
runs
utils
__init__.py 78B
train_val_utils.py 2KB
__pycache__
data_utils.py 6KB
models
vit.py 11KB
__init__.py 72B
__pycache__
共 10 条
- 1
资源评论
reset2021
- 粉丝: 55
- 资源: 41
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功