import time
import torch
from torch import nn, optim
import torchvision
import sys
import d2l
#GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# class FlattenLayer(torch.nn.Module):
# def __init__(self):
# super(FlattenLayer, self).__init__()
# def forward(self, x): # x shape: (batch, *, *, ...)
# return x.view(x.shape[0], -1)
def load_data_fashion_mnist(batch_size, resize=None, root='../Datasets/'): #~/Datasets/FashionMNIST'什么意思
if sys.platform.startswith('win'):
num_workers = 1
else:
num_workers = 4
trans = []
if resize: #非零非空为真,如果传入参数,resize,否则不用resize
trans.append(torchvision.transforms.Resize(size=resize))
trans.append(torchvision.transforms.ToTensor())
transform = torchvision.transforms.Compose(trans) #trans为列表
mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)
mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_iter, test_iter
batch_size = 64
train_iter, test_iter = load_data_fashion_mnist(batch_size, resize=224)
#VGG块
'''
@ num_convs: 一个VGG——block包含几个卷积层
@in_channels:输入图片通道数
@out_channels:输出通道数
功能:不改变图片的大小,通道数
'''
def vgg_block(num_convs, in_channels, out_channels):
blk = []
for i in range(num_convs):
if i == 0:
blk.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)) #填充1不改变图片的大小
else:
blk.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))
blk.append(nn.ReLU())
blk.append(nn.MaxPool2d(kernel_size=2, stride=2)) # 这里会使宽高减半
return nn.Sequential(*blk) #*表示拆包,将blk中的数据拆开,在函数中
'''
元组中的第一个元素为1 VGG_block 创建一个卷积层,如果为2,创建2个卷积层,
'''
conv_arch = ((1, 1, 64), (1, 64, 128), (2, 128, 256), (2, 256, 512), (2, 512, 512))
# 经过5个vgg_block, 宽高会减半5次, 变成 224/32 = 7
fc_features = 512 * 7 * 7 # c * w * h
fc_hidden_units = 4096 # 任意
'''
@conv_arch:确定卷积层的个数,对应的输入输出大小
@fc_features: 全连接层刚输入的节点数,
@fc_hidden_units:全连接层的节点个数
'''
def vgg(conv_arch, fc_features, fc_hidden_units=4096):
net = nn.Sequential()
# 卷积层部分
for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch): #5轮
# 每经过一个vgg_block都会使宽高减半
net.add_module("vgg_block_" + str(i+1), vgg_block(num_convs, in_channels, out_channels)) #搭建网络,
# 全连接层部分
net.add_module("fc", nn.Sequential(torch.nn.Flatten(), #默认从1维展平化 默认将第0维保留下来,其余拍成一维
nn.Linear(fc_features, fc_hidden_units),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_hidden_units, fc_hidden_units),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_hidden_units, 10)
))
return net
# def vgg(conv_arch, fc_features):
# net = nn.Sequential()
# # 卷积层部分
# for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch):
# # 每经过一个vgg_block都会使宽高减半
# net.add_module("vgg_block_" + str(i+1), vgg_block(num_convs, in_channels, out_channels))
# # 全连接层部分
# net.add_module("fc", nn.Sequential(torch.nn.Flatten(),
# nn.Linear(fc_features, 10),
#
# ))
# return net
net = vgg(conv_arch, fc_features)
def evaluate_accuracy(data_iter, net, device=None):
if device is None and isinstance(net, torch.nn.Module):
# 如果没指定device就使用net的device
device = list(net.parameters())[0].device
acc_sum, n = 0.0, 0
with torch.no_grad():
for X, y in data_iter:
net.eval() # 评估模式, 这会关闭dropout
acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
net.train() # 改回训练模式
n += y.shape[0]
return acc_sum / n
def train(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs):
net = net.to(device)
print("training on ", device)
loss = torch.nn.CrossEntropyLoss()
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()
for i, (X, y) in enumerate(train_iter):
print(i+1)
X = X.to(device)
y = y.to(device)
y_hat = net(X) #10个值,
l = loss(y_hat, y) #先把y——hat转换为sofmax(x),然后再计算交叉熵,y是一个值,0-9,会被转换成【000000001】这样吧
optimizer.zero_grad()
l.backward()
optimizer.step()
train_l_sum += l.cpu().item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
% (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
sys.exit()
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
if __name__ == '__main__':
train(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
没有合适的资源?快使用搜索试试~ 我知道了~
lenet,alexnet,VGG11源码
共3个文件
py:3个
需积分: 2 1 下载量 86 浏览量
2022-10-19
16:32:27
上传
评论
收藏 4KB 7Z 举报
温馨提示
lenet,alexnet,VGG11源码
资源详情
资源评论
资源推荐
收起资源包目录
01.7z (3个子文件)
01
VGGNet.py 6KB
AlexNet.py 5KB
LeNet.py 5KB
共 3 条
- 1
探索世界的秘密
- 粉丝: 3
- 资源: 1
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
评论0