import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
from tqdm import tqdm
import torch.nn.functional as F
import matplotlib.pyplot as plt
# 设置路径
data_dir = "./"
train_dir = os.path.join(data_dir, "train")
val_dir = os.path.join(data_dir, "val")
# 图像尺寸和批量大小
IMG_HEIGHT = 256
IMG_WIDTH = 256
BATCH_SIZE = 32
def preprocess_image(image_path):
image = Image.open(image_path).convert("RGB").resize((IMG_WIDTH, IMG_HEIGHT))
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
return transform(image)
def preprocess_label(label_path):
label = Image.open(label_path).convert("L").resize((IMG_WIDTH, IMG_HEIGHT))
transform = transforms.Compose([
transforms.ToTensor()
])
return transform(label)
class MedicalDataset(Dataset):
def __init__(self, image_dir, label_dir):
self.image_paths = sorted([os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith(".jpg")])
self.label_paths = sorted([os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.endswith(".png")])
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
image = preprocess_image(self.image_paths[idx])
label = preprocess_label(self.label_paths[idx])
return image, label
# CBAM Attention Mechanism
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True,
bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == 'avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size - 1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
super(CBAM, self).__init__()
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.no_spatial = no_spatial
if not no_spatial:
self.SpatialGate = SpatialGate()
def forward(self, x):
x_out = self.ChannelGate(x)
if not self.no_spatial:
x_out = self.SpatialGate(x_out)
return x_out
class UNet(nn.Module):
def __init__(self):
super(UNet, self).__init__()
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
def up_block(in_channels, out_channels):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.enc1 = conv_block(3, 64)
self.ca1 = CBAM(64)
self.enc2 = conv_block(64, 128)
self.ca2 = CBAM(128)
self.enc3 = conv_block(128, 256)
self.ca3 = CBAM(256)
self.enc4 = conv_block(256, 512)
self.ca4 = CBAM(512)
self.pool = nn.MaxPool2d(2)
self.bottleneck = conv_block(512, 1024)
self.cabottleneck = CBAM(1024)
self.up4 = up_block(1024, 512)
self.dec4 = conv_block(1024, 512)
self.up3 = up_block(512, 256)
self.dec3 = conv_block(512, 256)
self.up2 = up_block(256, 128)
self.dec2 = conv_block(256, 128)
self.up1 = up_block(128, 64)
self.dec1 = conv_block(128, 64)
self.final = nn.Conv2d(64, 1, kernel_size=1)
def forward(self, x):
e1 = self.enc1(x)
e1 = self.ca1(e1)
e2 = self.enc2(self.pool(e1))
e2 = self.ca2(e2)
e3 = self.enc3(self.pool(e2))
e3 = self.ca3(e3)
e4 = self.enc4(self.pool(e3))
e4 = self.ca4(e4)
b = self.bottleneck(self.pool(e4))
b = self.cabottleneck(b)
d4 = self.dec4(torch.cat([self.up4(b), e4], dim=1))
d3 = self.dec3(torch.cat([self.up3(d4), e3], dim=1))
d2 = self.dec2(torch.cat([self.up2(d3), e2], dim=1))
d1 = self.dec1(torch.cat([self.up1(d2), e1], dim=1))
return self.final(d1)
def calculate_metrics(outputs, labels, threshold=0.5):
predictions = (torch.sigmoid(outputs) > threshold).float()
correct = (predictions == labels).float().sum()
total = labels.numel()
accuracy = correct / total
flat_predictions = predictions.view(-1)
flat_labels = labels.view(-1)
tp = ((flat_predictions == 1) & (flat_labels == 1)).float().sum().item()
fp = ((flat_predictions == 1) & (flat_labels == 0)).float().sum().item()
fn = ((flat_predictions == 0) & (flat_labels == 1)).float().sum().item()
tn = ((flat_predictions == 0) & (flat_labels == 0)).float().sum().item()
dice = (2 * tp) / (2 * tp + fp + fn) if (2 * tp + fp + fn) > 0 else 0
iou = tp / (tp + fp + fn) if (tp + fp + fn) > 0 else 0
sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
specificity = tn / (tn + fp) if (tn + fp) > 0 else 0
return {
'accuracy': accuracy.item(
宸码
- 粉丝: 501
- 资源: 3
最新资源
- 基于小程序的家具购物小程序源代码(php+小程序+mysql+LW).zip
- 分布式电源优化配置与选址定容MATLAB程序基于遗传算法 (1)该程序为基于遗传算法的分布式电源优化配置与选址定容程序,硕士学位lunwen源程序,配有该lunwen (2)本程序可有效配置分布式电
- 输电线路单相接地测距 搭建如图1所示的35kV输电网模型,输电侧发电机出口电压10.5kV经过升压变压器变至38.5kV,受电侧经降压变压器降压至6.6kV 输电线路全长100km,架空线路线路正负序
- ROM数据库框架EFCore使用示例源码,vs2022运行通过
- 光伏储能微电网 光伏PV采用boost电路做mppt控制 蓄电池双向buckboost变器,采用电压电流双闭环控直流母线电压700V 后级三相逆变器
- 异步电机的VVVF的C代码+仿真模型,C代码可直接在simulink模型里进行在线仿真,所见即所得,仿真模型为离散化模型,C代码嵌入到模型里进行在线仿真,仿真通过后可以直接移植到各种MCU芯片里: 1
- 冻融循环 Comsol,冻融循环 Comsol,土柱冻胀融沉数值模拟 热-水-力三场耦合
- MMC整流器平均值模型simulink仿真,19电平,采用交流电流内环,直流电压外环控制,双二阶广义积分器锁相环,PI解耦环流抑制器,调制方式为最近电平逼近调制,完美运行 波形一二为直流侧电压电流
- 基于Fpga的hbm2系统设计: 实现对hbm2 ip核的读写访问接口时序控制 HBM 器件可提供高达 820GB s 的吞吐量性能和 32GB 的 HBM 容量,与 DDR5 实现方案相比
- 透析计算机语言之语句 C C++ DEV -C++必备基础学习资料
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈