import os
import random
import skimage.transform
import matplotlib.pyplot as plt
import numpy as np
import skimage.data
import tensorflow as tf
def load_data (data_dir):
dictories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir,d))]
labels = []
images = []
for d in dictories:
label_dir = os.path.join(data_dir,d)
file_names = [os.path.join(label_dir,f) for f in os.listdir(label_dir) if f.endswith(".ppm")]
for f in file_names:
images.append(skimage.data.imread(f))
labels.append(int(d))
return images,labels
train_data_dir = "datasets/BelgiumTS/Training/"
images,labels = load_data(train_data_dir)
images115 = [skimage.transform.resize(image,(115,115)) for image in images]
labels_a = np.array(labels)
images_a = np.array(images224)
graph = tf.Graph()
with graph.as_default():
images_ph = tf.placeholder(tf.float32,[None,115,115,3])
labels_ph = tf.placeholder(tf.int32,[None])
#one convolution layer
conv1 = tf.layers.conv2d(inputs=images_ph,
filters=96,
kernel_size=[5,5],
strides=(2,2),
padding='same',
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
bias_initializer=tf.zeros_initializer())
lrn1 = tf.nn.lrn(conv1,bias=1.0,alpha=0.001/9,beta=0.71)
pool1 = tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',)
#two convolution layer
conv2 = tf.layers.conv2d(inputs=pool1,
filters=256,
kernel_size=[5,5],
strides=(1,1),
padding='same',
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
bias_initializer=tf.zeros_initializer())
lrn2 = tf.nn.lrn(conv2,bias=1.0,alpha=0.001/9,beta=0.71)
pool2 = tf.nn.max_pool(lrn2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',)
#three convolution layer
conv3 = tf.layers.conv2d(inputs=pool2,
filters=384,
kernel_size=[3,3],
strides=(1,1),
padding='same',
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
bias_initializer=tf.zeros_initializer())
#four convolution layer
conv4 = tf.layers.conv2d(inputs=conv3,
filters=384,
kernel_size=[3,3],
strides=(1,1),
padding='same',
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
bias_initializer=tf.zeros_initializer())
#five convolution layer
conv5 = tf.layers.conv2d(inputs=conv4,
filters=256,
kernel_size=[3,3],
strides=(1,1),
padding='same',
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
bias_initializer=tf.zeros_initializer())
pool5 = tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',)
#fully_connected layer
pool5_flat = tf.contrib.layers.flatten(pool5)
fc1 = tf.contrib.layers.fully_connected(pool5_flat,4096,tf.nn.relu)
fc2 = tf.contrib.layers.fully_connected(fc1,4096,tf.nn.relu)
logits = tf.contrib.layers.fully_connected(fc2,62,tf.nn.relu)
predicted_labels = tf.argmax(logits,1)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels_ph))
correct_prediction = tf.equal(tf.cast(predicted_labels,tf.int32), labels_ph)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
init = tf.global_variables_initializer()
session = tf.Session(graph=graph)
_ = session.run([init])
batch_size = 64
#Define a function to fetch data by batch
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
for i in range(101):
train_loss,train_acc,n_batch = 0,0,0
for x_train,y_train in minibatches(images_a,labels_a,batch_size,shuffle=True):
_,err,ac = session.run([train,loss,accuracy],feed_dict={images_ph:x_train,labels_ph:y_train})
train_loss += err; train_acc += ac; n_batch += 1
train_loss = train_loss/ n_batch
train_acc = train_acc/ n_batch
print("{0} step,train loss: {1},train acc:{2}".format(i,train_loss,train_acc))
print("======================================")
print("----------------Testing-----------------")
test_dir = "datasets/BelgiumTS/Testing/"
test_images,test_labels = load_data(test_dir)
test_images224 = [skimage.transform.resize(image,(224,224)) for image in test_images]
predicted = session.run([predicted_labels],feed_dict={images_ph:test_images224})[0]
match_count = sum([int(y == y_) for y,y_ in zip(test_labels,predicted)])
accuracy = match_count / len(test_labels)
print("Accuracy:{:.3f}".format(accuracy))
session.close()