记录一下对transformer方法在计算机视觉任务中的应用方法的理解
参考博客:https://blog.csdn.net/weixin_42392454/article/details/122667271
参考代码:https://gitcode.net/mirrors/Runist/torch_vision_transformer?utm_source=csdn_github_accelerator
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
optimizer = optim.SGD(params, lr=args.lr, momentum=0.9, weight_decay=5e-5)
lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
optimizer.zero_grad()
logits = model(images)
loss = loss_function(logits, labels)
loss.backward()
optimizer.step()
scheduler.step()
模型网络图
假设输入维度为[B, C, H, W],假设其中C=3,H=224,W=224
假设patch_size=16,
则:num_patches=(H/patch_size)(W/patch_size)=(224/16)(224/16)=1414=196
embed_dim=Cpatch_sizepatch_size=316*16=768
# [B, C, H, W] -> [B, num_patches, embed_dim]=[B, 196, 768]
具体的流程:
image_size=224, patch_size=16, in_c=3, embed_dim=768
# The input tensor is divided into patches using 16x16 convolution
self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
# forward
#self.proj:[B, C, image_size, image_size]=[B, 3, 224, 224] -> [B, embed_dim, H, W]=[B, 768, 14, 14],后续操作中C=embed_dim=768,H=W=14
#flatten: [B, C, H, W] -> [B, C, HW]=[B, 768, 14*14]=[B, 768, 196]
# transpose: [B, C, HW] -> [B, HW, C]=[B, 196, 768],需要在最后一个维度对embed_dim=768维度进行norm,故做transpose
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
增加一个class token(分类层),数据格式和其他token一样,长度为768的向量,与位置编码的融合方式不一样,这里做的是Concat,这样做是因为分类信息是在后面需要取出来单独做预测的,所以不能以Add方式融合,shape也就从[196, 768]变为[197, 768].
self.pos_embed #[1, num_patches + self.num_tokens, embed_dim] = [1, 196+1, 768] = [1, 197, 768]
tensor broadcast:广播机制,self.pos_embed 的第一维是1,x的第一维是B,相加时,会对x的第一维的每个通道都加self.pos_embed
norm_layer=nn.LayerNorm
self.norm1 = norm_layer(dim)
self.drop_path:随机drop一个完整的block
self.attn: # [B, 197, 768]
[batch_size, num_patches + 1, total_embed_dim]
B, N, C = x.shape #B=batch_size, N=num_patches + 1=196+1=197, C=total_embed_dim=768
#1)获得q, k, v
# qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim] = [B, 197, 3*768]
# reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head] = [B, 197, 3, 8, 768/8], 其中假设num_heads=8
# permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head] = [3, B, 8, 197, 768/8]
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# [batch_size, num_heads, num_patches + 1, embed_dim_per_head] = [B, 8, 197, 768/8]
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
#2)计算注意力权重:w = softmax((q@k)*scale)
# transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1] = [B, 8, 768/8, 197]
# @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1] = [B, 8, 197, 197]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
#3)计算注意力得分s = w@v
# @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head] = [B, 8, 197, 768/8]
# transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head] = [B, 197, 8, 768/8]
# reshape: -> [batch_size, num_patches + 1, total_embed_dim] = [B, 197, 768]
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
self.norm2 = norm_layer(dim)
self.drop_path:随机drop一个完整的block
self.mlp: # [B, 197, 768]
in_features=dim=embed_dim #768
mlp_ratio=4.
mlp_hidden_dim = int(dim * mlp_ratio) #7687*4
hidden_features=mlp_hidden_dim
out_features = in_features #768
self.fc1 = nn.Linear(in_features, hidden_features) #(768, 768*4)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features) (768*4, 768)
self.drop = nn.Dropout(drop)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
norm_layer = nn.LayerNorm
self.norm = norm_layer(embed_dim)
self.num_features = self.embed_dim = embed_dim # 768
num_classes=1000
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
with torch.no_grad():
# predict class
output = torch.squeeze(model(image.to(device))).cpu()
predict = torch.softmax(output, dim=0) #将所有输出归一化映射为概率分布,概率和为1
index = torch.argmax(predict).numpy() #最大概率所在位置索引,即属于1000个类别中的哪一类
# 输出最大类别
print("prediction: {} prob: {:.3}\n".format(args.label_name[index],
predict[index].numpy()))
#输出每个类别的得分
for i in range(len(predict)):
print("class: {} prob: {:.3}".format(args.label_name[i],
predict[i].numpy()))