# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 13:29:43 2020
@author: 46953
"""
import os
import tensorflow as tf
from datetime import datetime
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import skimage.io as io
from skimage.transform import resize
from skimage import transform,data
from PIL import Image
import numpy as np
def read_data(data_dir):
datas = []
labels = []
fpaths = []
for fname in os.listdir(data_dir):
fpath = os.path.join(data_dir, fname)
fpaths.append(fpath)
image = Image.open(fpath)
data = np.array(image) / 255.0
label = int(fname.split("_")[0])
data = resize(data, [128, 128,1], mode = 'constant')
datas.append(data)
labels.append(label)
datas = np.array(datas)
labels = np.array(labels)
print("shape of datas: {}\tshape of labels: {}".format(datas.shape, labels.shape))
return datas
x_train = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 1], name="x_train")
y_train = tf.placeholder(dtype=tf.float32, shape=[None, 128, 128, 1], name="y_train")
lr = tf.placeholder(dtype=tf.float32)
conv1 = tf.layers.conv2d(inputs=x_train, filters=16, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv1 = tf.layers.dropout(inputs=conv1, rate=0.1)
conv1 = tf.layers.conv2d(inputs=conv1, filters=16, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
p1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=2, strides=2, padding="valid")
conv2 = tf.layers.conv2d(inputs=p1, filters=32, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv2 = tf.layers.dropout(inputs=conv2, rate=0.1)
conv2 = tf.layers.conv2d(inputs=conv2, filters=32, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
p2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=2, strides=2, padding="valid")
conv3 = tf.layers.conv2d(inputs=p2, filters=64, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv3 = tf.layers.dropout(inputs=conv3, rate=0.1)
conv3 = tf.layers.conv2d(inputs=conv3, filters=64, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
p3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=2, strides=2, padding="valid")
conv4 = tf.layers.conv2d(inputs=p3, filters=128, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv4 = tf.layers.dropout(inputs=conv4, rate=0.1)
conv4 = tf.layers.conv2d(inputs=conv4, filters=128, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
p4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=2, strides=2, padding="valid")
conv5 = tf.layers.conv2d(inputs=p4, filters=256, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv5 = tf.layers.dropout(inputs=conv5, rate=0.2)
conv5 = tf.layers.conv2d(inputs=conv5, filters=256, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
up1 = tf.layers.conv2d_transpose(inputs=conv5, filters=128, kernel_size=2, strides=2,
padding="same", kernel_initializer=tf.initializers.glorot_uniform())
up1 = tf.concat([up1, conv4], axis=3)
conv6 = tf.layers.conv2d(inputs=up1, filters=128, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv6 = tf.layers.dropout(inputs=conv6, rate=0.2)
conv6 = tf.layers.conv2d(inputs=conv6, filters=128, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
up2 = tf.layers.conv2d_transpose(inputs=conv6, filters=64, kernel_size=2, strides=2,
padding="same", kernel_initializer=tf.initializers.glorot_uniform())
up2 = tf.concat([up2, conv3], axis=3)
conv7 = tf.layers.conv2d(inputs=up2, filters=64, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv7 = tf.layers.dropout(inputs=conv7, rate=0.2)
conv7 = tf.layers.conv2d(inputs=conv7, filters=64, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
up3 = tf.layers.conv2d_transpose(inputs=conv7, filters=32, kernel_size=2, strides=2,
padding="same", kernel_initializer=tf.initializers.glorot_uniform())
up3 = tf.concat([up3, conv2], axis=3)
conv8 = tf.layers.conv2d(inputs=up3, filters=32, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv8 = tf.layers.dropout(inputs=conv8, rate=0.2)
conv8 = tf.layers.conv2d(inputs=conv8, filters=32, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
up4 = tf.layers.conv2d_transpose(inputs=conv8, filters=16, kernel_size=2, strides=2,
padding="same", kernel_initializer=tf.initializers.glorot_uniform())
up4 = tf.concat([up4, conv1], axis=3)
conv9 = tf.layers.conv2d(inputs=up4, filters=16, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv9 = tf.layers.dropout(inputs=conv9, rate=0.1)
conv9 = tf.layers.conv2d(inputs=conv9, filters=16, kernel_size=3, strides=1,
padding="same", activation=tf.nn.relu,
kernel_initializer=tf.initializers.glorot_uniform())
conv10 = tf.layers.conv2d(inputs=conv9, filters=1, kernel_size=1, strides=1,
kernel_initializer=tf.initializers.glorot_uniform())
logits = tf.nn.sigmoid(conv10)
learning_rate = 0.0051
y_out = logits
cost = tf.reduce_sum(tf.square(y_out - y_train))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Create a saver for writing training checkpoints.
evaluate=tf.reduce_sum(tf.square(y_out - y_train))
saver = tf.train.Saver(max_to_keep=30)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.initialize_all_variables())
noise_path = "D:/train"
truth_path = "D:/truth"
test_t_path = "D:/test_truth"
test_n_path = "D:/test"
Train= read_data(noise_path)
Truth= read_data(truth_path)
Train_vali=read_data(test_n_path)
Truth_vali=read_data(test_t_path)
channels=1
layers=4
filter_size=3
pool_size=2
features_root=16
keep_prob=1.0
n_class=1
s1=128
s2=128
- 1
- 2
前往页