# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 09:43:06 2017
@author: WeiZhe
Train data existing at files
"""
import numpy as np
import tensorflow as tf
import random
# Load Data
p2pimgs=np.load(r'E:\Program\PythonProgram\p2p\data\imgsave.npy')
p2plabels=np.load(r'E:\Program\PythonProgram\p2p\data\labelsave.npy')
#%%
# weight init
def weight_variable(shape, strName):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=strName)
# bias init
def bias_variable(shape, strName):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=strName)
# convolution
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# max pooling
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#%% Model setup
# placeholder
x = tf.placeholder("float", [None, 90*140])
y_ = tf.placeholder("float", [None,6])
keep_prob = tf.placeholder("float")
def CNN():
# first layer, conv&pool
W_conv1 = weight_variable([3, 4, 1, 32], 'W_conv1')
b_conv1 = bias_variable([32], 'b_conv1')
x_image = tf.reshape(x,[-1,90,140,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
out1 = tf.nn.dropout(h_pool1,keep_prob)
# batches*20*15
# second layer, con&pool
W_conv2 = weight_variable([3, 4, 32, 64], 'W_conv2')
b_conv2 = bias_variable([64], 'b_conv2')
h_conv2 = tf.nn.relu(conv2d(out1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
out2 = tf.nn.dropout(h_pool2,keep_prob)
# batches*10*8
# third layer
# W_conv3 = weight_variable([3, 4, 64, 64], 'W_conv3')
# b_conv3 = bias_variable([64], 'b_conv3')
#
# h_conv3 = tf.nn.relu(conv2d(out2, W_conv3) + b_conv3)
# h_pool3 = max_pool_2x2(h_conv3)
# out3 = tf.nn.dropout(h_pool3,keep_prob)
# fourth layer
# W_conv4 = weight_variable([3, 3 , 64, 64], 'W_conv4')
# b_conv4 = bias_variable([64], 'b_conv4')
#
# h_conv4 = tf.nn.relu(conv2d(out3, W_conv4) + b_conv4)
# h_pool4 = max_pool_2x2(h_conv4)
# out4 = tf.nn.dropout(h_pool4,keep_prob)
# Fully connected layer
W_fc1 = weight_variable([23*35*64, 1024], 'W_fc1')
b_fc1 = bias_variable([1024], 'b_fc1')
out2_flat = tf.reshape(out2, [-1, 23*35*64])
h_fc1 = tf.nn.relu(tf.matmul(out2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# batches*1024
# output
W_fc2 = weight_variable([1024, 6], 'W_fc2')
b_fc2 = bias_variable([6], 'b_fc2')
# out=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
out = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return out
def Train_CNN():
# train and test
y_conv=CNN()
#cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1.0e-4).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
saver = tf.train.Saver()
#%% Run
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(10000):
NUM=random.sample(range(p2pimgs.shape[0]),64)
batch_xs=np.array([p2pimgs[Ind] for Ind in NUM])
batch_ys=np.array([p2plabels[Ind] for Ind in NUM])
sess.run(train_step,feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
if i%100 == 0:
NUM=random.sample(range(p2pimgs.shape[0]),100)
batch_xs_test=np.array([p2pimgs[Ind] for Ind in NUM])
batch_ys_test=np.array([p2plabels[Ind] for Ind in NUM])
print("step %d, training accuracy %f"%(i,sess.run(accuracy,feed_dict={x:batch_xs_test, y_: batch_ys_test, keep_prob: 1.0})))
savePath=saver.save(sess,'./model/p2p.model')
print('Save Path: ',savePath)
sess.close()
if __name__=='__main__':
Train_CNN()