import torch as t
import torch.nn as nn
from tqdm import tqdm #进度条
import segmentation_models_pytorch as smp
from dataset import *
device = t.device("cuda") if t.cuda.is_available() else t.device("cpu")
train_data=BrainMRIdataset(train_img,train_label,train_transformer)
test_data=BrainMRIdataset(test_img,test_label,test_transformer)
dl_train=DataLoader(train_data,batch_size=4,shuffle=True)
dl_test=DataLoader(test_data,batch_size=4,shuffle=True)
model = smp.Unet(
encoder_name="resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
in_channels=3, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=2, # model output channels (number of classes in your dataset)
)
img,label=next(iter(dl_train))
model=model.to('cuda')
img=img.to('cuda')
pred=model(img)
label=label.to('cuda')
loss_fn=nn.CrossEntropyLoss()#交叉熵损失函数
loss_fn(pred,label)
optimizer=torch.optim.Adam(model.parameters(),lr=0.0001)
def train_epoch(epoch, model, trainloader, testloader):
correct = 0
total = 0
running_loss = 0
epoch_iou = [] #交并比
net=model.train()
for x, y in tqdm(testloader):
x, y = x.to('cuda'), y.to('cuda')
y_pred = model(x)
loss = loss_fn(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
y_pred = torch.argmax(y_pred, dim=1)
correct += (y_pred == y).sum().item()
total += y.size(0)
running_loss += loss.item()
intersection = torch.logical_and(y, y_pred)
union = torch.logical_or(y, y_pred)
batch_iou = torch.sum(intersection) / torch.sum(union)
epoch_iou.append(batch_iou.item())
epoch_loss = running_loss / len(trainloader.dataset)
epoch_acc = correct / (total * 256 * 256)
test_correct = 0
test_total = 0
test_running_loss = 0
epoch_test_iou = []
t.save(net.state_dict(), './Results2/weights/unet_weight/{}.pth'.format(epoch))
model.eval()
with torch.no_grad():
for x, y in tqdm(testloader):
x, y = x.to('cuda'), y.to('cuda')
y_pred = model(x)
loss = loss_fn(y_pred, y)
y_pred = torch.argmax(y_pred, dim=1)
test_correct += (y_pred == y).sum().item()
test_total += y.size(0)
test_running_loss += loss.item()
intersection = torch.logical_and(y, y_pred)#预测值和真实值之间的交集
union = torch.logical_or(y, y_pred)#预测值和真实值之间的并集
batch_iou = torch.sum(intersection) / torch.sum(union)
epoch_test_iou.append(batch_iou.item())
epoch_test_loss = test_running_loss / len(testloader.dataset)
epoch_test_acc = test_correct / (test_total * 256 * 256)#预测正确的值除以总共的像素点
print('epoch: ', epoch,
'loss: ', round(epoch_loss, 3),
'accuracy:', round(epoch_acc, 3),
'IOU:', round(np.mean(epoch_iou), 3),
'test_loss: ', round(epoch_test_loss, 3),
'test_accuracy:', round(epoch_test_acc, 3),
'test_iou:', round(np.mean(epoch_test_iou), 3)
)
return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc
if __name__ == "__main__":
epochs=5
for epoch in range(epochs):
train_epoch(epoch,
model,
dl_train,
dl_test)
没有合适的资源?快使用搜索试试~ 我知道了~
unet脑肿瘤分割完整代码
共2000个文件
tif:1989个
py:7个
pyc:3个
5星 · 超过95%的资源 需积分: 0 28 下载量 6 浏览量
2024-03-05
10:54:17
上传
评论 3
收藏 291.31MB RAR 举报
温馨提示
U-net脑肿瘤分割完整代码 数据集 网络 训练 测试 只跑了20个epoch https://blog.csdn.net/qq_45845375/article/details/135588237
资源推荐
资源详情
资源评论
收起资源包目录
unet脑肿瘤分割完整代码 (2000个子文件)
unet.png 96KB
trainunet.py 4KB
train.py 3KB
model.py 3KB
dataset.py 3KB
testunet.py 2KB
test.py 1KB
__init__.py 0B
model.cpython-311.pyc 6KB
dataset.cpython-311.pyc 5KB
__init__.cpython-311.pyc 147B
TCGA_FG_6690_20020226_41.tif 198KB
TCGA_FG_6690_20020226_54.tif 198KB
TCGA_FG_6690_20020226_51.tif 198KB
TCGA_FG_6690_20020226_17.tif 198KB
TCGA_FG_6690_20020226_11.tif 198KB
TCGA_FG_6690_20020226_16.tif 198KB
TCGA_FG_6690_20020226_32.tif 198KB
TCGA_FG_6690_20020226_25.tif 198KB
TCGA_FG_6690_20020226_13.tif 198KB
TCGA_FG_6690_20020226_36.tif 198KB
TCGA_FG_6690_20020226_24.tif 198KB
TCGA_FG_6690_20020226_6.tif 198KB
TCGA_FG_6690_20020226_42.tif 198KB
TCGA_FG_6690_20020226_57.tif 198KB
TCGA_FG_6690_20020226_46.tif 198KB
TCGA_FG_6690_20020226_5.tif 198KB
TCGA_FG_6690_20020226_21.tif 198KB
TCGA_FG_6690_20020226_45.tif 198KB
TCGA_FG_6690_20020226_38.tif 198KB
TCGA_FG_6690_20020226_1.tif 198KB
TCGA_FG_6690_20020226_28.tif 198KB
TCGA_FG_6690_20020226_10.tif 198KB
TCGA_FG_6690_20020226_27.tif 198KB
TCGA_FG_6690_20020226_59.tif 198KB
TCGA_FG_6690_20020226_43.tif 198KB
TCGA_FG_6690_20020226_8.tif 198KB
TCGA_FG_6690_20020226_39.tif 198KB
TCGA_FG_6690_20020226_40.tif 198KB
TCGA_FG_6690_20020226_44.tif 198KB
TCGA_FG_6690_20020226_60.tif 198KB
TCGA_FG_6690_20020226_12.tif 198KB
TCGA_FG_6690_20020226_20.tif 198KB
TCGA_FG_6690_20020226_53.tif 198KB
TCGA_FG_6690_20020226_15.tif 198KB
TCGA_FG_6690_20020226_31.tif 198KB
TCGA_FG_6690_20020226_9.tif 198KB
TCGA_FG_6690_20020226_14.tif 198KB
TCGA_FG_6690_20020226_30.tif 198KB
TCGA_FG_6690_20020226_52.tif 198KB
TCGA_FG_6690_20020226_19.tif 198KB
TCGA_FG_6690_20020226_50.tif 198KB
TCGA_FG_6690_20020226_48.tif 198KB
TCGA_FG_6690_20020226_37.tif 198KB
TCGA_FG_6690_20020226_56.tif 198KB
TCGA_FG_6690_20020226_4.tif 198KB
TCGA_FG_6690_20020226_23.tif 198KB
TCGA_FG_6690_20020226_18.tif 198KB
TCGA_FG_6690_20020226_35.tif 198KB
TCGA_FG_6690_20020226_49.tif 198KB
TCGA_FG_6690_20020226_58.tif 198KB
TCGA_FG_6690_20020226_29.tif 198KB
TCGA_FG_6690_20020226_3.tif 198KB
TCGA_FG_6690_20020226_26.tif 198KB
TCGA_FG_6690_20020226_22.tif 198KB
TCGA_FG_6690_20020226_7.tif 198KB
TCGA_FG_6690_20020226_55.tif 198KB
TCGA_FG_6690_20020226_47.tif 198KB
TCGA_FG_6690_20020226_33.tif 198KB
TCGA_FG_6690_20020226_2.tif 198KB
TCGA_FG_6690_20020226_34.tif 198KB
TCGA_FG_A60K_20040224_17.tif 198KB
TCGA_FG_A60K_20040224_12.tif 198KB
TCGA_FG_A60K_20040224_5.tif 198KB
TCGA_FG_A60K_20040224_3.tif 198KB
TCGA_FG_A60K_20040224_60.tif 198KB
TCGA_FG_A60K_20040224_63.tif 198KB
TCGA_FG_A60K_20040224_14.tif 198KB
TCGA_FG_A60K_20040224_35.tif 198KB
TCGA_FG_A60K_20040224_67.tif 198KB
TCGA_FG_A60K_20040224_68.tif 198KB
TCGA_FG_A60K_20040224_70.tif 198KB
TCGA_FG_A60K_20040224_18.tif 198KB
TCGA_FG_A60K_20040224_33.tif 198KB
TCGA_FG_A60K_20040224_44.tif 198KB
TCGA_FG_A60K_20040224_61.tif 198KB
TCGA_FG_A60K_20040224_48.tif 198KB
TCGA_FG_A60K_20040224_57.tif 198KB
TCGA_FG_A60K_20040224_43.tif 198KB
TCGA_FG_A60K_20040224_15.tif 198KB
TCGA_FG_A60K_20040224_2.tif 198KB
TCGA_FG_A60K_20040224_62.tif 198KB
TCGA_FG_A60K_20040224_36.tif 198KB
TCGA_FG_A60K_20040224_10.tif 198KB
TCGA_FG_A60K_20040224_7.tif 198KB
TCGA_FG_A60K_20040224_55.tif 198KB
TCGA_FG_A60K_20040224_51.tif 198KB
TCGA_FG_A60K_20040224_71.tif 198KB
TCGA_FG_A60K_20040224_69.tif 198KB
TCGA_FG_A60K_20040224_26.tif 198KB
共 2000 条
- 1
- 2
- 3
- 4
- 5
- 6
- 20
资源评论
- qq_443922822024-04-08请问segmentation_models_pytorch是哪个文件呢?
湘溶溶
- 粉丝: 237
- 资源: 1
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功