根据pytorch官网的实现Resnet简化代码进行解读
导包
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
复制代码
在使用预训练的参数的时候的网络下载路径集合
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
复制代码
下面长会用到3 * 3的卷积,提前进行封装
def conv3x3(in_planes, out_planes, stride=1, padding=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, bias=False)
复制代码
下面长会用到1 * 1的卷积,提前进行封装
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
复制代码
构建BasicBlock(较核心)
- 在resnet-50以下的都是使用basicneck的模块,我们可以看到里面的结构就是3 * 3卷积 – BN – relu – 3 * 3卷积 – BN – relu的堆叠,注意一下downsample,因为我们的网络是残差网络,x + F(x),所以我们要保证我们的原始特征和经过卷积之后的特征保持相同的形状和通道数才能使用Add的方式将他们加到一起
class BasicBlock(nn.Module):
expansion = 1 # 经过Block之后channel的变化量
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
# downsample: 调整维度一致之后才能相加
# norm_layer:batch normalization layer
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d # 如果bn层没有自定义,就使用标准的bn层
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x # 保存x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x) # downsample调整x的维度,F(x)+x一致才能相加
# 这个地方就是残差的和
out += identity
out = self.relu(out) # 先相加再激活
return out
复制代码
构建BottleBlock(较核心)
- 在resnet-50层及以上的都是使用bottleneck的模块,我们可以看到里面的结构就是1 * 1卷积 – BN – 3 * 3卷积 – BN – 1 * 1卷积 – BN – relu,注意一下downsample(同上)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion) # 输入的channel数:planes * self.expansion
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
复制代码
构建Resnet(核心)
class ResNet(nn.Module):
def __init__(self, block, layers, num_class=1000, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
# conv1 in ppt figure
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # (1,1)等于GAP
self.fc = nn.Linear(512 * block.expansion, num_class)
# 这个地方是进行参数的初始化,self.modules()获取每层,判断是什么层,执行对应的初始化方法
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# 构建stage2的方式,注意这个地方的 downsample 的使用
def _make_layer(self, block, planes, blocks, stride=1):
# 生成不同的stage/layer
# block: block type(basic block/bottle block)
# blocks: blocks的数量
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
# 需要调整维度
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride), # 同时调整spatial(H x W))和channel两个方向
norm_layer(planes * block.expansion)
)
# 在这个地方使用的是list + Sequential(*list)的方式构建网络,
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, norm_layer)) # 第一个block单独处理
self.inplanes = planes * block.expansion # 记录layerN的channel变化,具体请看ppt resnet表格
for _ in range(1, blocks): # 从1开始循环,因为第一个模块前面已经单独处理
layers.append(block(self.inplanes, planes, norm_layer=norm_layer))
return nn.Sequential(*layers) # 使用Sequential层组合blocks,形成stage。如果layers=[2,3,4],那么*layers=?
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
复制代码
预构建Resnet
- 这个函数其实就是构建resnet的封装函数,根据不同的参数,生成不同的模型(层数、是否预训练、使用block)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
复制代码
resnet18
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
复制代码
调用展示模型
model = resnet18(pretrained=True)
model.eval()
复制代码
使用pytorch自带的网络,直接加载进行使用
import torch
model2 = torch.hub.load('pytorch/vision:v0.4.2', 'resnet18', pretrained=True)
# or any of these variants
# model = torch.hub.load('pytorch/vision:v0.4.2', 'resnet34', pretrained=True)
# model = torch.hub.load('pytorch/vision:v0.4.2', 'resnet50', pretrained=True)
# model = torch.hub.load('pytorch/vision:v0.4.2', 'resnet101', pretrained=True)
# model = torch.hub.load('pytorch/vision:v0.4.2', 'resnet152', pretrained=True)
model2.eval()
# 上面使用的pretrained的方式和直接使用load加载现成模型的参数是一样的
model.state_dict()
model2.state_dict()
复制代码
使用图片进行测试
# Download an example image from the pytorch website
import urllib
url, filename = ("https://github.com/pytorch/hub/raw/master/dog.jpg", "dog.jpg")
try:
urllib.URLopener().retrieve(url, filename)
except:
urllib.request.urlretrieve(url, filename)
# sample execution (requires torchvision)
from PIL import Image
from torchvision import transforms
input_image = Image.open(filename)
# 进行预处理
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
# 增加一个batch的维度
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# 是否使用gpu进行训练
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)
# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes
print(output[0])
# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
print(torch.nn.functional.softmax(output[0], dim=0))
result = torch.nn.functional.softmax(output[0], dim=0)
result.argmax()
复制代码
整体的代码
# 导包
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
# 在使用预训练的参数的时候的网络下载路径集合
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
# 下面长会用到3 * 3的卷积,提前进行封装
def conv3x3(in_planes, out_planes, stride=1, padding=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, bias=False)
# ? Why no bias 因为我们在经过卷积层一般后面会接一个归一化BN层,所以即使加上bais后亦不会起到什么作用,白增加参数,增加训练难度
# 下面长会用到1 * 1的卷积,提前进行封装
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False) # ? Why no bias: 如果卷积层之后是BN层,那么可以不用偏置参数,可以节省内存
# 在resnet-50以下的都是使用basicneck的模块,我们可以看到里面的结构就是3 * 3卷积 - BN - relu - 3 * 3卷积 - BN - relu的堆叠,注意一下downsample,因为我们的网络是残差网络,x + F(x),所以我们要保证我们的原始特征和经过卷积之后的特征保持相同的形状和通道数才能使用Add的方式将他们加到一起
class BasicBlock(nn.Module):
expansion = 1 # 经过Block之后channel的变化量
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
# downsample: 调整维度一致之后才能相加
# norm_layer:batch normalization layer
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d # 如果bn层没有自定义,就使用标准的bn层
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x # 保存x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x) # downsample调整x的维度,F(x)+x一致才能相加
# 这个地方就是残差的和
out += identity
out = self.relu(out) # 先相加再激活
return out
# 在resnet-50层及以上的都是使用bottleneck的模块,我们可以看到里面的结构就是1 * 1卷积 - BN - 3 * 3卷积 - BN - 1 * 1卷积 - BN - relu,注意一下downsample(同上)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion) # 输入的channel数:planes * self.expansion
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
# 这个部分是Resnet的主体部分,首先是第一层,经过一个7 * 7的卷积层 - BN - relu - maxpooling - stage2 - stage3 - stage4 - stage5 - 拉平(AdaptiveAvgPool2d) -fc(全连接层),stage2-5的构建方式都是使用_make_layer的方式,在连接全连接层之前,使用的AdaptiveAvgPool2d的方式,实际上就是每层选出一个最大值,将所有层的最大值拼接成一个向量,可以直接和全连接层进行计算。
class ResNet(nn.Module):
def __init__(self, block, layers, num_class=1000, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
# conv1 in ppt figure
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # (1,1)等于GAP
self.fc = nn.Linear(512 * block.expansion, num_class)
# 这个地方是进行参数的初始化,self.modules()获取每层,判断是什么层,执行对应的初始化方法
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# 构建stage2的方式,注意这个地方的 downsample 的使用
def _make_layer(self, block, planes, blocks, stride=1):
# 生成不同的stage/layer
# block: block type(basic block/bottle block)
# blocks: blocks的数量
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
# 需要调整维度
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride), # 同时调整spatial(H x W))和channel两个方向
norm_layer(planes * block.expansion)
)
# 在这个地方使用的是list + Sequential(*list)的方式构建网络,
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, norm_layer)) # 第一个block单独处理
self.inplanes = planes * block.expansion # 记录layerN的channel变化,具体请看ppt resnet表格
for _ in range(1, blocks): # 从1开始循环,因为第一个模块前面已经单独处理
layers.append(block(self.inplanes, planes, norm_layer=norm_layer))
return nn.Sequential(*layers) # 使用Sequential层组合blocks,形成stage。如果layers=[2,3,4],那么*layers=?
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# 这个函数其实就是构建resnet的封装函数,根据不同的参数,生成不同的模型
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
# resnet18的模型
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
model = resnet18(pretrained=True)
model.eval()
复制代码
© 版权声明
文章版权归作者所有,未经允许请勿转载。
THE END