import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
from functools import partial
# import Constants
nonlinearity = partial(F.relu, inplace=True)
class DACblock(nn.Module):
def __init__(self, channel):
super(DACblock, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=5, padding=5)
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class DACblock_without_atrous(nn.Module):
def __init__(self, channel):
super(DACblock_without_atrous, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class DACblock_with_inception(nn.Module):
def __init__(self, channel):
super(DACblock_with_inception, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.conv1x1 = nn.Conv2d(2 * channel, channel, kernel_size=1, dilation=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.dilate3(self.dilate1(x)))
dilate_concat = nonlinearity(self.conv1x1(torch.cat([dilate1_out, dilate2_out], 1)))
dilate3_out = nonlinearity(self.dilate1(dilate_concat))
out = x + dilate3_out
return out
class DACblock_with_inception_blocks(nn.Module):
def __init__(self, channel):
super(DACblock_with_inception_blocks, self).__init__()
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0)
self.conv3x3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1)
self.conv5x5 = nn.Conv2d(channel, channel, kernel_size=5, dilation=1, padding=2)
self.pooling = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.conv1x1(x))
dilate2_out = nonlinearity(self.conv3x3(self.conv1x1(x)))
dilate3_out = nonlinearity(self.conv5x5(self.conv1x1(x)))
dilate4_out = self.pooling(x)
out = dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class PSPModule(nn.Module):
def __init__(self, features, out_features=1024, sizes=(2, 3, 6, 14)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)
self.relu = nn.ReLU()
def _make_stage(self, features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear') for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return self.relu(bottle)
class SPPblock(nn.Module):
def __init__(self, in_channels):
super(SPPblock, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3)
self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5)
self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0)
def forward(self, x):
self.in_channels, h, w = x.size(1), x.size(2), x.size(3)
self.layer1 = F.upsample(self.conv(self.pool1(x)), size=(h, w), mode='bilinear')
self.layer2 = F.upsample(self.conv(self.pool2(x)), size=(h, w), mode='bilinear')
self.layer3 = F.upsample(self.conv(self.pool3(x)), size=(h, w), mode='bilinear')
self.layer4 = F.upsample(self.conv(self.pool4(x)), size=(h, w), mode='bilinear')
out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4, x], 1)
return out
class DecoderBlock(nn.Module):
def __init__(self, in_channels, n_filters):
super(DecoderBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
class CE_Net_(nn.Module):
def __init__(self, num_classes=1, num_channels=3):
super(CE_Net_, self).__init__()
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
self.dblock = DACblock(512)
self.spp = SPPblock(512)
self.decoder4 = DecoderBlock(516, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
se
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
使用FCN、Unet、Unet++、Segnet、R2Unet、DenseNet、DenseUnet、Cenet、ChannelNet、AttentionUnet等网络对视网膜血管进行分割。
资源推荐
资源详情
资源评论
收起资源包目录
基于深度学习的视网膜分割 (163个子文件)
24_manual1.gif 13KB
36_manual1.gif 13KB
11_manual1.gif 13KB
13_manual1.gif 13KB
06_manual2.gif 13KB
19_manual2.gif 12KB
13_manual2.gif 12KB
06_manual1.gif 12KB
28_manual1.gif 12KB
22_manual1.gif 12KB
09_manual2.gif 12KB
20_manual2.gif 12KB
02_manual1.gif 12KB
05_manual1.gif 12KB
27_manual1.gif 12KB
16_manual2.gif 12KB
11_manual2.gif 12KB
02_manual2.gif 12KB
10_manual1.gif 12KB
01_manual2.gif 12KB
39_manual1.gif 11KB
16_manual1.gif 11KB
03_manual1.gif 11KB
07_manual1.gif 11KB
01_manual1.gif 11KB
29_manual1.gif 11KB
38_manual1.gif 11KB
25_manual1.gif 11KB
19_manual1.gif 11KB
08_manual1.gif 11KB
12_manual1.gif 11KB
34_manual1.gif 11KB
09_manual1.gif 11KB
33_manual1.gif 11KB
12_manual2.gif 11KB
37_manual1.gif 11KB
18_manual2.gif 11KB
04_manual1.gif 11KB
35_manual1.gif 11KB
14_manual1.gif 11KB
32_manual1.gif 11KB
04_manual2.gif 11KB
17_manual1.gif 11KB
03_manual2.gif 10KB
14_manual2.gif 10KB
17_manual2.gif 10KB
40_manual1.gif 10KB
10_manual2.gif 10KB
05_manual2.gif 10KB
30_manual1.gif 10KB
26_manual1.gif 10KB
07_manual2.gif 10KB
21_manual1.gif 10KB
18_manual1.gif 10KB
08_manual2.gif 10KB
15_manual1.gif 10KB
15_manual2.gif 10KB
20_manual1.gif 9KB
31_manual1.gif 9KB
23_manual1.gif 8KB
23_training_mask.gif 3KB
36_training_mask.gif 3KB
33_training_mask.gif 3KB
24_training_mask.gif 3KB
04_test_mask.gif 3KB
16_test_mask.gif 3KB
07_test_mask.gif 3KB
35_training_mask.gif 3KB
22_training_mask.gif 3KB
09_test_mask.gif 3KB
03_test_mask.gif 3KB
28_training_mask.gif 3KB
39_training_mask.gif 3KB
37_training_mask.gif 3KB
11_test_mask.gif 3KB
20_test_mask.gif 3KB
40_training_mask.gif 3KB
27_training_mask.gif 3KB
21_training_mask.gif 3KB
30_training_mask.gif 3KB
15_test_mask.gif 3KB
14_test_mask.gif 3KB
18_test_mask.gif 3KB
38_training_mask.gif 3KB
31_training_mask.gif 3KB
10_test_mask.gif 3KB
02_test_mask.gif 3KB
17_test_mask.gif 3KB
05_test_mask.gif 3KB
12_test_mask.gif 3KB
34_training_mask.gif 3KB
32_training_mask.gif 3KB
19_test_mask.gif 3KB
08_test_mask.gif 3KB
25_training_mask.gif 3KB
06_test_mask.gif 3KB
01_test_mask.gif 3KB
29_training_mask.gif 3KB
26_training_mask.gif 3KB
13_test_mask.gif 3KB
共 163 条
- 1
- 2
只搬烫手的砖
- 粉丝: 1802
- 资源: 34
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
- 3
- 4
前往页