import scipy.io as sio
import numpy as np
import os
import random
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import argparse
#模型
from model import *
#数据
from traindataset import *
if __name__ == "__main__":
#设置参数
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=80, help="number of epochs")
parser.add_argument("--batch_size", type=int, default=64, help="size of each image batch")
parser.add_argument("--gradient_accumulations", type=int, default=2, help="number of gradient accums before step")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
parser.add_argument("--data_size", type=int, default=10, help="size of each data dimension")
parser.add_argument("--train_path", default="makeMat/train_data", help="the path of train data")
parser.add_argument("--test_path", default="makeMat/test_data", help="the path of test data")
parser.add_argument("--crop_size", default=[36, 36, 20], help="allow for multi-scale training")
parser.add_argument("--thresh", default=0.5, help="i>thresh,i=1,else,i=0")
opt = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#os.makedirs() 方法用于递归创建目录。
model = resnet3d(1,1).to(device)
#Get dataloader
trian_dataset = MyTrainData(opt.train_path, opt.crop_size)
test_dataset = MyTrainData(opt.test_path, opt.crop_size)
"""
load_path = 'makeMat/0002.mat'
load_data = sio.loadmat(load_path)
for key, value in load_data.items():
print(key, ':', value)
"""
train_data = torch.utils.data.DataLoader(
trian_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
pin_memory=True,
)
test_data = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
pin_memory=True,
)
#优化器
optimizer = torch.optim.Adam(model.parameters())
#损失函数
criterion = nn.BCELoss()
for epoch in range(opt.epochs):
model.train()
loss_sigma = 0.0
correct = 0.0
total = 0.0
for batch_i, (imgs, targets) in enumerate(train_data):
#print("batch_i:\n",batch_i)
imgs = imgs.type(torch.FloatTensor) # 转Float
imgs = imgs.to(device)
#print("imgs.shape:",imgs.shape)
#添加大小为1的维度
imgs = torch.unsqueeze(imgs, 1) # 在第1个维度上扩展
targets = targets.type(torch.FloatTensor)#转float 这个会把gpu变成cpu
targets = torch.squeeze(targets, 2)#删除第二个维度
targets = targets.to(device)#requires_grad = flase
outputs = model(imgs)
outputs = outputs.to(device)
optimizer.zero_grad()
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
loss_sigma = loss_sigma + loss
total += targets.size(0)
predict = torch.tensor(outputs)#复制
predict[predict >= opt.thresh] = 1
predict[predict < opt.thresh] = 0
correct += (predict == targets).squeeze().sum().cpu().numpy()
#print("correct: ",correct)
# 每10个iteration 打印一次训练信息,loss为10个iteration的平均
if batch_i % 10== 0:
loss_avg = loss_sigma / 10.0
loss_sigma = 0.0
print("Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}" \
.format(epoch + 1, opt.epochs, batch_i + 1, len(train_data), loss_avg, correct / total))
#模型在验证集上的表现情况
if epoch % 1 == 0:
loss_sigma = 0.0
model.eval()
correct = 0
total = 0.0
for batch_i, (imgs, targets) in enumerate(test_data):
# forward
imgs = imgs.type(torch.FloatTensor) # 转Float
imgs = imgs.to(device)
imgs = torch.unsqueeze(imgs, 1) # 在第1个维度上扩展
targets = targets.type(torch.FloatTensor)#转float 这个会把gpu变成cpu
targets = torch.squeeze(targets, 2)#删除第二个维度
targets = targets.to(device)#requires_grad = flase
outputs = model(imgs)
outputs.detach_()
# 计算loss
loss = criterion(outputs, targets)
loss_sigma += loss.item()
# 统计
total += targets.size(0)
predict = torch.tensor(outputs)#复制
predict[predict >= opt.thresh] = 1
predict[predict < opt.thresh] = 0
#print("predict: ",predict)
#print("predict.shape: ",predict.shape)
#print("targets: ",targets)
#print("targets.shape: ",targets.shape)
correct += (predict == targets).squeeze().sum().cpu().numpy()
# 每10个iteration 打印一次训练信息,loss为10个iteration的平均
if batch_i % 10 == 0:
loss_avg = loss_sigma / 10.0
loss_sigma = 0.0
print("test: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}" \
.format(epoch + 1, opt.epochs, batch_i + 1, len(test_data), loss_avg, correct / total))
#每个epcoch 保存一次
torch.save(model, 'net_'+str(epoch)+'.pkl') # 保存整个神经网络的结构和模型参数
torch.save(model.state_dict(), 'net_ALL_'+ str(epoch)+'.pkl') # 只保存神经网络的模型参数
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
使用pytorch搭建分类网络,针对Luna16数据集生成的疑似肺结节进行分类,实现假阳性剔除。其样本集保存为.Mat的形式(数据+类别),固定大小为24*40*40。 注意!!!!!原始getMat.py、traindataset.py有误(有bug),因为csdn无法修改资源,详情见https://blog.csdn.net/qq_24739717/article/details/101034728
资源推荐
资源详情
资源评论
收起资源包目录
Pytorch分类网络(肺结节假阳性剔除).zip (3个子文件)
Pytorch分类网络(肺结节假阳性剔除)
classfierMat.py 6KB
model.py 5KB
traindataset.py 3KB
共 3 条
- 1
资源评论
- 七小夕呀2019-12-15没有看到getmat.py啊 博主NotFound19112019-12-16getMat不在这里 https://download.csdn.net/download/qq_24739717/11782801
NotFound1911
- 粉丝: 465
- 资源: 3
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功