import numbers
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.nn.parallel import data_parallel
from functools import wraps
import random
class _Residual_Block(nn.Module):
# 16 16+16 1
def __init__(self, in_planes, out_planes, groups=1, wide_width=True, downsample=False, upsample=False):
super().__init__()
self.downsample = downsample
self.upsample = upsample
middle_planes = (in_planes if in_planes > out_planes else out_planes) if wide_width else out_planes
self.conv1 = nn.Conv2d(in_planes, middle_planes, 3, 1, 1, bias=False, groups=groups)
self.relu1 = nn.LeakyReLU(0.02, inplace=True)
self.conv2 = nn.Conv2d(middle_planes, out_planes, 3, 1, 1, bias=False, groups=groups)
if in_planes != out_planes:
self.translation = nn.Conv2d(in_planes, out_planes, 1, 1, 0, bias=False, groups=groups)
else:
self.translation = None
def forward(self, x):
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='bilinear')
identity = x
out = self.conv1(x)
out = self.relu1(out)
out = self.conv2(out)
if self.translation is not None:
identity = self.translation(identity)
out += identity
if self.downsample:
out = F.avg_pool2d(out, 2)
return out
# 残差块 4 16 16+16 downsample: i>3? True: false
def make_layer(block, blocks, in_planes, out_planes, groups=1, norm_layer=nn.BatchNorm2d, downsample=False, upsample=False):
assert blocks >= 1
layers = []
# 16 16+16 1 nn.BatchNorm2d
layers.append(block(in_planes, out_planes, groups, norm_layer, downsample=downsample, upsample=upsample))
for i in range(1, blocks):
layers.append(block(out_planes, out_planes, groups, norm_layer))
return nn.Sequential(*layers)
class _Memory_Block(nn.Module):
# 64 512 0.999
def __init__(self, hdim, kdim, moving_average_rate=0.999):
super().__init__()
self.c = hdim # 64
self.k = kdim # 512
self.moving_average_rate = moving_average_rate # 0.999
self.units = nn.Embedding(kdim, hdim)
def update(self, x, score, m=None):
'''
x: (n, c)
e: (k, c)
score: (n, k)
'''
if m is None:
m = self.units.weight.data
x = x.detach()
embed_ind = torch.max(score, dim=1)[1] # (n, )
embed_onehot = F.one_hot(embed_ind, self.k).type(x.dtype) # (n, k)
embed_onehot_sum = embed_onehot.sum(0)
embed_sum = x.transpose(0, 1) @ embed_onehot # (c, k)
embed_mean = embed_sum / (embed_onehot_sum + 1e-6)
new_data = m * self.moving_average_rate + embed_mean.t() * (1 - self.moving_average_rate)
if self.training:
self.units.weight.data = new_data
return new_data
def forward(self, x, update_flag=True):
'''
x: (b, c, h, w)
embed: (k, c)
'''
b, c, h, w = x.size()
assert c == self.c
k, c = self.k, self.c
x = x.permute(0, 2, 3, 1)
x = x.reshape(-1, c) # (n, c)
m = self.units.weight.data # (k, c)
xn = F.normalize(x, dim=1) # (n, c)
mn = F.normalize(m, dim=1) # (k, c)
score = torch.matmul(xn, mn.t()) # (n, k)
if update_flag:
m = self.update(x, score, m)
mn = F.normalize(m, dim=1) # (k, c)
score = torch.matmul(xn, mn.t()) # (n, k)
soft_label = F.softmax(score, dim=1)
out = torch.matmul(soft_label, m) # (n, c)
out = out.view(b, h, w, c).permute(0, 3, 1, 2)
return out, score
class _Fuse_Block(nn.Module):
def __init__(self, x_dim, res_dim):
super().__init__()
self.norm = nn.BatchNorm2d(res_dim, affine=False)
self.alpha = nn.Conv2d(res_dim, 1, 1, 1, 0, bias=False)
self.gamma = nn.Conv2d(x_dim, res_dim, 1, 1, 0, bias=False)
self.beta = nn.Conv2d(x_dim, res_dim, 1, 1, 0, bias=False)
self.affine_gamma = nn.Parameter(torch.zeros(1, res_dim, 1, 1))
self.affine_beta = nn.Parameter(torch.zeros(1, res_dim, 1, 1))
self.gamma.weight.data.zero_()
self.beta.weight.data.zero_()
def forward(self, x, res):
res = self.norm(res)
gamma = self.gamma(x) + 1
beta = self.beta(x)
out0 = res * gamma + beta
out1 = res * (self.affine_gamma + 1) + self.affine_beta
alpha = torch.sigmoid(self.alpha(res))
if self.training:
drop = torch.rand((res.shape[0], 1, 1, 1), device=res.device).gt(0.2).float()
alpha = alpha * drop
out = (1 - alpha) * out0 + alpha * out1
return out
class DerainNet(nn.Module):
def __init__(self, num_blocks=5, ngf=16, num_layers=4, delta=16, num_scales=3, kdim=512, moving_average_rate=0.999):
super().__init__()
self.num_blocks = num_blocks # 5
self.head = nn.Conv2d(3, ngf, 3, 1, 1, bias=False)
cc = ngf # 16
self.enc = nn.ModuleDict()
for i in range(num_blocks):
self.enc['enc{}'.format(i)] = make_layer(_Residual_Block, num_layers, cc, cc+delta, downsample=i<num_scales)
# 残差块 4 16 16+16 i>3? True: false
cc += delta
# 64 512 0.999
self.memory = _Memory_Block(cc, kdim, moving_average_rate)
self.dec = nn.ModuleDict()
self.fuse = nn.ModuleDict()
for i in range(num_blocks):
self.dec['dec{}'.format(i)] = make_layer(_Residual_Block, num_layers, cc, cc-delta, upsample=i>num_blocks-num_scales-1)
cc -= delta
if i < num_blocks-1:
self.fuse['fuse{}'.format(i)] = _Fuse_Block(cc, cc)
self.tail = nn.Conv2d(ngf, 3, 3, 1, 1)
def forward(self, x):
x = self.head(x)
xi = x
res = []
for i in range(self.num_blocks):
x = self.enc['enc{}'.format(i)](x)
res.append(x)
x, _ = self.memory(x)
res = res[::-1]
x = self.dec['dec0'](x)
for i in range(self.num_blocks-1):
x = self.fuse['fuse{}'.format(i)](x, res[i+1])
x = self.dec['dec{}'.format(i+1)](x)
x = xi - x
x = self.tail(x)
x = torch.tanh(x)
return x
def feature_extract(self, x):
feature = []
x = self.head(x)
feature.append(x)
xi = x
res = []
for i in range(self.num_blocks):
x = self.enc['enc{}'.format(i)](x)
feature.append(x)
res.append(x)
x, _ = self.memory(x)
res = res[::-1]
x = self.dec['dec0'](x)
for i in range(self.num_blocks - 1):
x = self.fuse['fuse{}'.format(i)](x, res[i + 1])
x = self.dec['dec{}'.format(i + 1)](x)
x = xi - x
return feature
class Discriminator(nn.Module):
def __init__(self, num_blocks=5, in_chs=16, num_layers=4, delta_chs=16, num_scales=4):
super(Discriminator, self).__init__()
self.num_blocks = num_blocks # 5
self.num_scales = num_scales
self.head
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
基于MOSS网络+CR损失实现图像去雨python源码(带训练好的模型).zip基于MOSS网络+CR损失实现图像去雨python源码(带训练好的模型).zip基于MOSS网络+CR损失实现图像去雨python源码(带训练好的模型).zip基于MOSS网络+CR损失实现图像去雨python源码(带训练好的模型).zip基于MOSS网络+CR损失实现图像去雨python源码(带训练好的模型).zip
资源推荐
资源详情
资源评论
收起资源包目录
基于MOSS网络+CR损失实现图像去雨python源码(带训练好的模型).zip (21个子文件)
utils
data_RGB.py 472B
SSIM.py 3KB
dataset_RGB.py 7KB
PSNR.py 462B
__pycache__
data_RGB.cpython-38.pyc 776B
PSNR.cpython-38.pyc 641B
data_RGB.cpython-36.pyc 766B
SSIM.cpython-38.pyc 3KB
dataset_RGB.cpython-36.pyc 5KB
dataset_RGB.cpython-38.pyc 6KB
model
networks.py 10KB
CR.py 2KB
__pycache__
CR.cpython-36.pyc 2KB
CR.cpython-38.pyc 2KB
networks.cpython-36.pyc 6KB
networks.cpython-38.pyc 6KB
test
featuremap.py 9KB
train.py 8KB
test.py 2KB
result
model_best.pth 32.42MB
model_latest.pth 32.42MB
共 21 条
- 1
资源评论
z同学的编程之路
- 粉丝: 1808
- 资源: 2129
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功