import tensorflow as tf
from configs import *
from os.path import join
def batch_queue_for_training(data_path):
filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(join(data_path, '*.png')))
file_reader = tf.WholeFileReader()
_, image_file = file_reader.read(filename_queue)
patch = tf.image.decode_png(image_file, NUM_CHENNELS)
# we must set the shape of the image before making batches
patch.set_shape([PATCH_SIZE, PATCH_SIZE, NUM_CHENNELS])
patch = tf.image.convert_image_dtype(patch, dtype=tf.float32)
if MAX_RANDOM_BRIGHTNESS > 0:
patch = tf.image.random_brightness(patch, MAX_RANDOM_BRIGHTNESS)
if len(RANDOM_CONTRAST_RANGE) == 2:
patch = tf.image.random_contrast(patch, *RANDOM_CONTRAST_RANGE)
patch = tf.image.random_flip_left_right(patch)
high_res_patch = tf.image.random_flip_up_down(patch)
crop_margin = PATCH_SIZE - LABEL_SIZE
assert crop_margin >= 0
if crop_margin > 1:
high_res_patch = tf.random_crop(patch, [LABEL_SIZE, LABEL_SIZE, NUM_CHENNELS])
# crop_pos = tf.random_uniform([2], 0, crop_margin, dtype=tf.int32)
# offset = tf.convert_to_tensor([crop_pos[0], crop_pos[1], 0])
# size = tf.convert_to_tensor([CROP_SIZE, CROP_SIZE, NUM_CHENNELS])
# high_res_patch = tf.slice(patch, offset, size)
# additional 1px shifting to low_res_patch, reducing the even/odd issue in nearest neighbor scaler.
# shift1px = tf.random_uniform([2], -1, 2, dtype=tf.int32)
# offset += tf.convert_to_tensor([shift1px[0], shift1px[1], 0])
# offset = tf.clip_by_value(offset, 0, crop_margin-1)
# low_res_patch = tf.slice(patch, offset, size)
downscale_size = [INPUT_SIZE, INPUT_SIZE]
resize_nn = lambda: tf.image.resize_nearest_neighbor([high_res_patch], downscale_size, True)
resize_area = lambda: tf.image.resize_area([high_res_patch], downscale_size, True)
resize_cubic = lambda: tf.image.resize_bicubic([high_res_patch], downscale_size, True)
r = tf.random_uniform([], 0, 3, dtype=tf.int32)
low_res_patch = tf.case({tf.equal(r, 0): resize_nn, tf.equal(r, 1): resize_area}, default=resize_cubic)[0]
# add jpeg noise to low_res_patch
if JPEG_NOISE_LEVEL > 0:
low_res_patch = tf.image.convert_image_dtype(low_res_patch, dtype=tf.uint8, saturate=True)
jpeg_quality = 100 - 5 * JPEG_NOISE_LEVEL
jpeg_code = tf.image.encode_jpeg(low_res_patch, quality=jpeg_quality)
low_res_patch = tf.image.decode_jpeg(jpeg_code)
low_res_patch = tf.image.convert_image_dtype(low_res_patch, dtype=tf.float32)
# we must set tensor's shape before doing following processes
low_res_patch.set_shape([INPUT_SIZE, INPUT_SIZE, NUM_CHENNELS])
# add noise to low_res_patch
if GAUSSIAN_NOISE_STD > 0:
low_res_patch += tf.random_normal(low_res_patch.get_shape(), stddev=GAUSSIAN_NOISE_STD)
low_res_patch = tf.clip_by_value(low_res_patch, 0, 1.0)
high_res_patch = tf.clip_by_value(high_res_patch, 0, 1.0)
# low_res_patch -= 0.5 # approximate mean-zero data
# high_res_patch -= 0.5
# low_res_patch = tf.clip_by_value(low_res_patch, -0.5, 0.5)
# high_res_patch = tf.clip_by_value(high_res_patch, -0.5, 0.5)
# Generate batch
low_res_batch, high_res_batch = tf.train.shuffle_batch(
[low_res_patch, high_res_patch],
batch_size=BATCH_SIZE,
num_threads=NUM_PROCESS_THREADS,
capacity=MIN_QUEUE_EXAMPLES + 3 * BATCH_SIZE,
min_after_dequeue=MIN_QUEUE_EXAMPLES)
return low_res_batch, high_res_batch
def batch_queue_for_testing(data_path):
filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(join(data_path, '*.png')))
file_reader = tf.WholeFileReader()
_, image_file = file_reader.read(filename_queue)
patch = tf.image.decode_png(image_file, NUM_CHENNELS)
# we must set the shape of the image before making batches
patch.set_shape([PATCH_SIZE, PATCH_SIZE, NUM_CHENNELS])
patch = tf.image.convert_image_dtype(patch, dtype=tf.float32)
crop_margin = PATCH_SIZE - LABEL_SIZE
offset = tf.convert_to_tensor([crop_margin // 2, crop_margin // 2, 0])
size = tf.convert_to_tensor([LABEL_SIZE, LABEL_SIZE, NUM_CHENNELS])
high_res_patch = tf.slice(patch, offset, size)
downscale_size = [INPUT_SIZE, INPUT_SIZE]
resize_nn = lambda: tf.image.resize_nearest_neighbor([high_res_patch], downscale_size, True)
resize_area = lambda: tf.image.resize_area([high_res_patch], downscale_size, True)
resize_cubic = lambda: tf.image.resize_bicubic([high_res_patch], downscale_size, True)
r = tf.random_uniform([], 0, 3, dtype=tf.int32)
low_res_patch = tf.case({tf.equal(r, 0): resize_nn, tf.equal(r, 1): resize_area}, default=resize_cubic)[0]
# add jpeg noise to low_res_patch
if JPEG_NOISE_LEVEL > 0:
low_res_patch = tf.image.convert_image_dtype(low_res_patch, dtype=tf.uint8, saturate=True)
jpeg_quality = 100 - 5 * JPEG_NOISE_LEVEL
jpeg_code = tf.image.encode_jpeg(low_res_patch, quality=jpeg_quality)
low_res_patch = tf.image.decode_jpeg(jpeg_code)
low_res_patch = tf.image.convert_image_dtype(low_res_patch, dtype=tf.float32)
# we must set tensor's shape before doing following processes
low_res_patch.set_shape([INPUT_SIZE, INPUT_SIZE, NUM_CHENNELS])
# add noise to low_res_patch
if GAUSSIAN_NOISE_STD > 0:
low_res_patch += tf.random_normal(low_res_patch.get_shape(), stddev=GAUSSIAN_NOISE_STD)
low_res_patch = tf.clip_by_value(low_res_patch, 0, 1.0)
high_res_patch = tf.clip_by_value(high_res_patch, 0, 1.0)
# low_res_patch -= 0.5 # approximate mean-zero data
# high_res_patch -= 0.5
# low_res_patch = tf.clip_by_value(low_res_patch, -0.5, 0.5)
# high_res_patch = tf.clip_by_value(high_res_patch, -0.5, 0.5)
# Generate batch
low_res_batch, high_res_batch = tf.train.batch(
[low_res_patch, high_res_patch],
batch_size=BATCH_SIZE,
num_threads=1,
capacity=MIN_QUEUE_EXAMPLES + 3 * BATCH_SIZE)
return low_res_batch, high_res_batch
# def batch_queue_for_pair_training(data_path):
# filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(join(data_path, '*.png')))
# file_reader = tf.WholeFileReader()
# _, image_file = file_reader.read(filename_queue)
# patch_pair = tf.image.decode_png(image_file, NUM_CHENNELS)
# # we must set the shape of the image before making batches
# patch_pair.set_shape([PATCH_SIZE, PATCH_SIZE * 2, NUM_CHENNELS])
#
# # random brightness and contrast synchronously
# # patch_pair = tf.cast(patch_pair, tf.float32) / 127.5 - 1.0
# patch_pair = tf.image.convert_image_dtype(patch_pair, dtype=tf.float32)
# argumented_image = tf.image.random_brightness(patch_pair, MAX_RANDOM_BRIGHTNESS)
# argumented_image = tf.image.random_contrast(argumented_image, *RANDOM_CONTRAST_RANGE)
#
# # patch_pair is formated like: low_res_patch | high_res_patch
# low_res_patch = argumented_image[:, :PATCH_SIZE, ...]
# high_res_patch = argumented_image[:, PATCH_SIZE:, ...]
#
# # add jpeg noise to low_res_patch
# if JPEG_NOISE_LEVEL > 0:
# low_res_patch = tf.image.convert_image_dtype(low_res_patch, dtype=tf.uint8, saturate=True)
# jpeg_quality = 100 - 5 * JPEG_NOISE_LEVEL
# jpeg_code = tf.image.encode_jpeg(low_res_patch, quality=jpeg_quality)
# low_res_patch = tf.image.decode_jpeg(jpeg_code)
# low_res_patch = tf.image.convert_image_dtype(low_res_patch, dtype=tf.float32)
#
# crop_margin = PATCH_SIZE - INPUT_SIZE
# if crop_margin > 1:
# # random crop synchronously
# crop_pos = tf.random_uniform([2], 0, crop_margin, dtype=tf.int32)
#
没有合适的资源?快使用搜索试试~ 我知道了~
Tensorflow实现二次元图像的超分辨率
共30个文件
png:9个
py:8个
xml:5个
5星 · 超过95%的资源 需积分: 50 387 下载量 37 浏览量
2016-12-17
15:20:01
上传
评论 22
收藏 14.49MB ZIP 举报
温馨提示
使用tensorflow实现了一个vgg-style网络,用于对动漫风格的图像进行超分辨率处理。 解压后源码在src目录下,data目录下是用于训练的数据,data/originals目录用于存放你收集的原始无损图片,运行make_noisefree_data后,程序会自动从data/originals中读取文件并裁剪,然后保存到data/train,data/valid和data/test目录下,分别用于训练、验证和测试。训练中产生的模型会保存在checkpoints目录下。
资源推荐
资源详情
资源评论
收起资源包目录
anima2x.zip (30个子文件)
anima2x
checkpoints
models
vgg_deconv_7_0-1
model.ckpt-1000000.data-00000-of-00001 8.43MB
model.ckpt-1000000.index 2KB
model.ckpt-1000000.meta 224KB
training_summary
src
make_noisefree_data.py 10KB
train.py 3KB
anima2x.py 3KB
test.py 3KB
images
small_nn.png 395KB
small_cubic.png 321KB
big_nn.png 1.17MB
ground_truth.png 936KB
big_cubic.png 1.04MB
small_area.png 308KB
anima2x.png 1.15MB
big_linear.png 1.03MB
data_inputs.py 11KB
.idea
misc.xml 687B
workspace.xml 43KB
Anima2x.iml 593B
inspectionProfiles
Project_Default.xml 743B
profiles_settings.xml 241B
modules.xml 266B
models.py 4KB
__pycache__
models.cpython-35.pyc 4KB
layers.cpython-35.pyc 4KB
data_inputs.cpython-35.pyc 4KB
configs.cpython-35.pyc 994B
layers.py 5KB
configs.py 1KB
data
train
originals
Konachan.com - 231783 boots building city forest key long_hair scenic seifuku skirt tagme_(artist) tagme_(character) tree water white_hair.png 2.05MB
valid
test
inferences
共 30 条
- 1
aipiano
- 粉丝: 1271
- 资源: 6
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
- 3
- 4
- 5
- 6
前往页