""" utils.py
"""
import os
import torch
import numpy as np
import torchvision
import torchvision.transforms as transforms
import torchvision.utils as vutils
import time
import warnings
name_dataparallel = torch.nn.DataParallel.__name__
log10 = np.log(10)
def compute_psnr(x , label , max_diff):
assert max_diff in [255,1,2]
if max_diff == 255:
x = x.clamp( 0 , 255 )
elif max_diff == 1:
x = x.clamp( 0 , 1 )
elif max_diff == 2 :
x = x.clamp( -1 , 1 )
mse = (( x - label ) **2 ).mean()
return 10*torch.log( max_diff**2 / mse ) / log10
def lr_warmup(epoch, warmup_length):
if epoch < warmup_length:
p = max(0.0, float(epoch)) / float(warmup_length)
p = 1.0 - p
return np.exp(-p*p*5.0)
else:
return 1.0
def load_optimizer(optimizer , model , path , epoch = None ):
"""
return the epoch
"""
if type(model).__name__ == name_dataparallel:
model = model.module
if epoch is None:
for i in reversed( range(10000) ):
p = "{}/{}_epoch{}.pth".format( path,type(optimizer).__name__+'_'+type(model).__name__,i )
if os.path.exists( p ):
optimizer.load_state_dict( torch.load( p ) )
return i
else:
p = "{}/{}_epoch{}.pth".format( path,type(optimizer).__name__+'_'+type(model).__name__,epoch )
if os.path.exists( p ):
optimizer.load_state_dict( torch.load( p ) )
return epoch
else:
warnings.warn("resume optimizer not found at {}".format(p))
warnings.warn("resume model not found ")
return -1
def load_model(model,path ,epoch = None , strict= True):
"""
return the last epoch
"""
if type(model).__name__ == name_dataparallel:
model = model.module
if epoch is None:
for i in reversed( range(10000) ):
p = "{}/{}_epoch{}.pth".format( path,type(model).__name__,i )
if os.path.exists( p ):
model.load_state_dict( torch.load( p ) , strict = strict)
return i
else:
p = "{}/{}_epoch{}.pth".format( path,type(model).__name__,epoch )
if os.path.exists( p ):
model.load_state_dict( torch.load( p ) , strict = strict)
return epoch
else:
warnings.warn("resume model not found at {}".format(p))
warnings.warn("resume model not found ")
return -1
def set_requires_grad(module , b ):
for parm in module.parameters():
parm.requires_grad = b
def adjust_dyn_range(x, drange_in, drange_out):
if not drange_in == drange_out:
scale = float(drange_out[1]-drange_out[0])/float(drange_in[1]-drange_in[0])
bias = drange_out[0]-drange_in[0]*scale
x = x.mul(scale).add(bias)
return x
def resize(x, size):
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Scale(size),
transforms.ToTensor(),
])
return transform(x)
def make_image_grid(x, ngrid):
x = x.clone().cpu()
if pow(ngrid,2) < x.size(0):
grid = make_grid(x[:ngrid*ngrid], nrow=ngrid, padding=0, normalize=True, scale_each=False)
else:
grid = torch.FloatTensor(ngrid*ngrid, x.size(1), x.size(2), x.size(3)).fill_(1)
grid[:x.size(0)].copy_(x)
grid = make_grid(grid, nrow=ngrid, padding=0, normalize=True, scale_each=False)
return grid
def save_image_single(x, path, imsize=512):
from PIL import Image
grid = make_image_grid(x, 1)
ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
im = Image.fromarray(ndarr)
im = im.resize((imsize,imsize), Image.NEAREST)
im.save(path)
def save_image_grid(x, path, imsize=512, ngrid=4):
from PIL import Image
grid = make_image_grid(x, ngrid)
ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
im = Image.fromarray(ndarr)
im = im.resize((imsize,imsize), Image.NEAREST)
im.save(path)
def save_model(model,dirname,epoch):
if type(model).__name__ == name_dataparallel:
model = model.module
torch.save( model.state_dict() , '{}/{}_epoch{}.pth'.format(dirname,type(model).__name__,epoch ) )
def save_optimizer(optimizer,model,dirname,epoch):
if type(model).__name__ == name_dataparallel:
model = model.module
torch.save( optimizer.state_dict() , '{}/{}_epoch{}.pth'.format(dirname,type(optimizer).__name__ +'_' +type(model).__name__,epoch ) )
def make_summary(writer, key, value, step):
if hasattr(value, '__len__'):
for idx, img in enumerate(value):
summary = tf.Summary()
sio = BytesIO()
scipy.misc.toimage(img).save(sio, format='png')
image_summary = tf.Summary.Image(encoded_image_string=sio.getvalue())
summary.value.add(tag="{}/{}".format(key, idx), image=image_summary)
writer.add_summary(summary, global_step=step)
else:
summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])
writer.add_summary(summary, global_step=step)
import torch
import math
irange = range
def make_grid(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Make a grid of images.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by subtracting the minimum and dividing by the maximum pixel value.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If True, scale each image in the batch of
images separately rather than the (min, max) over all images.
pad_value (float, optional): Value for the padded pixels.
Example:
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = torch.stack(tensor, dim=0)
if tensor.dim() == 2: # single image H x W
tensor = tensor.view(1, tensor.size(0), tensor.size(1))
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = torch.cat((tensor, tensor, tensor), 0)
return tensor
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
tensor = torch.cat((tensor, tensor, tensor), 1)
if normalize is True:
tensor = tensor.clone() # avoid modifying tensor in-place
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min)
def norm_range(t, range):
if range is not None:
norm_ip(t, range[0], range[1])
else:
norm_ip(t, t.min(), t.max())
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
基于图像去模糊模型实现像素点注意力机制的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。 基于图像去模糊模型实现像素点注意力机制的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。基于图像去模糊模型实现像素点注意力机制的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。基于图像去模糊模型实现像素点注意力机制的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。基于图像去模糊模型实现像素点注意力机制的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修
资源推荐
资源详情
资源评论
收起资源包目录
基于图像去模糊模型实现像素点注意力机制的Python仿真.zip (37个子文件)
基于图像去模糊模型实现像素点注意力机制的Python仿真
m_test.py 2KB
utils.py 9KB
VGG19.py 692B
pytorch_ssim
__init__.py 3KB
__pycache__
__init__.cpython-37.pyc 3KB
network.py 6KB
layers.py 8KB
val_gopro_gamma.list 1008B
train_gopro_gamma.list 129KB
try.py 685B
test_save.py 2KB
compute_ssim.py 4KB
data.py 4KB
log.py 2KB
test
000234.png 827KB
3.png 950KB
1.png 888KB
6.png 732KB
003011.png 814KB
5.png 923KB
4.png 824KB
003092.png 780KB
2.png 1019KB
.gitignore 350B
test.list 36B
train_config.py 666B
train.py 8KB
test.py 122B
test_label
000234.png 951KB
3.png 1.16MB
1.png 1.07MB
6.png 790KB
003011.png 964KB
5.png 1.08MB
4.png 988KB
003092.png 812KB
2.png 1.21MB
共 37 条
- 1
资源评论
不安分的小女孩
- 粉丝: 9439
- 资源: 2139
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功