import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
import torchvision
from torchvision import datasets, transforms
# 要用到的一些参数
args = {
"batch_size": 512,
"epochs": 20,
"device": torch.device("cuda" if torch.cuda.is_available() else "cpu")
}
# 训练集
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data/', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1037,), (0.3081,))
])
),
batch_size=args['batch_size'],
shuffle=True,
drop_last=True, # 每个 epoch 丢弃掉最后不够一个 batch 的数据,避免出现维度问题
)
# 测试集
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data/', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1037,), (0.3081,))
])
),
batch_size=args['batch_size'],
shuffle=False,
)
# 模型结构
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5), # 卷积层: (in_channel, out_channel, kernel_size)
nn.ReLU(inplace=True), # 激活层 ReLU
nn.MaxPool2d(2, 2), # 最大池化
nn.Conv2d(10, 20, kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Flatten(), # 拉平成向量
nn.Linear(500, 256), # 全连接层
nn.ReLU(inplace=True),
nn.Linear(256, 10),
)
def forward(self, x):
return F.log_softmax(self.net(x))
# 实例化模型和优化器,这里使用 Adam 优化器
model = Net().to(args['device'])
optimizer = optim.Adam(model.parameters())
# 定义训练函数
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if (batch_idx + 1) % 30 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# 定义测试函数
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum') # 将一批的损失相加
pred = output.max(1, keepdim=True)[1] # 找到概率最大的下标
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print("\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%) \n".format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)
))
# 开始训练,每一轮训练结束后测试
for epoch in range(1, args['epochs'] + 1):
train(model, args['device'], train_loader, optimizer, epoch)
test(model, args['device'], test_loader)
'''examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
fig = plt.figure()
for i in range(12):
plt.subplot(3,4,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
plt.show()'''
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
with torch.no_grad():
output = model(example_data)
fig = plt.figure()
for i in range(12):
plt.subplot(3,4,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Prediction: {}".format(
output.data.max(1, keepdim=True)[1][i].item())
)
plt.xticks([])
plt.yticks([])
plt.show()
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
1.zip (2个子文件)
1
手写数字.py 4KB
data
MNIST
raw
train-images-idx3-ubyte.gz 424KB
共 2 条
- 1
资源评论
猿来不是你
- 粉丝: 6
- 资源: 40
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功