import numpy as np
import tensorflow as tf
import os
from PIL import Image
import base64
import matplotlib.pyplot as plt
batchSize = 30
num_epochs = 200
import pandas as pd
#
# #dataset = np.load('trainData.npz')
# x_test = dataset['arr_0']
# # x = np.array(x)
# # x = x.reshape([-1, 400, 400, 1])
# y_test = dataset['arr_1']
# division = int(len(x_test) * 0.5)
# x_test = x_test[division:]
# y_test = y_test[division:]
def tfRecordRead(fileNameQue, heigh, width, channels, n_class):
reader = tf.TFRecordReader()
# 创建一个队列来维护输入文件列表
# 从文件中读出一个Example
_, serialized_example = reader.read(fileNameQue)
# 用FixedLenFeature将读入的Example解析成tensor
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)
})
# 将字符串解析成图像对应的像素数组
image = tf.decode_raw(features['image'], tf.float32)
# image = tf.decode_raw(features["image"], tf.uint8)
image = tf.reshape(image, [heigh, width, channels])
# image = tf.cast(image, tf.float32) * (1 / 255.0)
labels = tf.cast(features['label'], tf.int64)
labels = tf.one_hot(labels, n_class, 1, 0)
return image, labels
def tfRecordBatchRead(filename, heigh, width, channels, n_class, batchSize):
fileNameQue = tf.train.string_input_producer([filename], shuffle=True, num_epochs=num_epochs)
image, labels = tfRecordRead(fileNameQue, heigh, width, channels, n_class) # fetch图像和label
min_after_dequeue = 100
capacity = min_after_dequeue + 3 * batchSize
# 预取图像和label并随机打乱,组成batch,此时tensor rank发生了变化,多了一个batch大小的维度
imageBatch, labelBatch = tf.train.shuffle_batch([image, labels], batch_size=batchSize,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=2)
return imageBatch, labelBatch
filename = r'D:\ZWJProject\tensorflow\CNN\record\Imageoutput.tfrecords'
# filename = 'Imageoutput.tfrecords'
heigh, width, channels, n_class = 100, 100, 3, 3
print(heigh, width, channels, n_class)
imageBatch, labelBatch = tfRecordBatchRead(filename, heigh, width, channels, n_class, batchSize)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# -----------------构建网络----------------------
'''
本程序cnn网络模型,共有7层,前三层为卷积层,后三层为全连接层,前三层中,每层包含卷积、激活、池化层
'''
# 占位符,设置输入参数的大小和格式
x = tf.placeholder(tf.float32, [None, heigh, width, channels])
y = tf.placeholder(tf.float32, [None, n_class])
def inference(input_tensor, train, regularizer, channels=3, n_class=3):
# -----------------------第一层----------------------------
with tf.variable_scope('layer1-conv1'):
# 初始化权重conv1_weights为可保存变量,大小为5x5,1(RGB),数量为32个
conv1_weights = tf.get_variable("weight", [5, 5, channels, 32],
initializer=tf.truncated_normal_initializer(stddev=0.1))
# 初始化偏置conv1_biases,数量为32个
conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0))
# 卷积计算,tf.nn.conv2d为tensorflow自带2维卷积函数,input_tensor为输入数据,conv1_weights为权重,strides=[1, 1, 1, 1]表示左右上下滑动步长为1,padding='SAME'表示输入和输出大小一样,即补0
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
# 激励计算,调用tensorflow的relu函数
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.name_scope("layer2-pool1"):
# 池化计算,调用tensorflow的max_pool函数,strides=[1,2,2,1],表示池化边界,2个对一个生成,padding="VALID"表示不操作。
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
# -----------------------第二层----------------------------
with tf.variable_scope("layer3-conv2"):
# 同上,不过参数的有变化,根据卷积计算和通道数量的变化,设置对应的参数
conv2_weights = tf.get_variable("weight", [5, 5, 32, 64],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# -----------------------第三层----------------------------
# 同上,不过参数的有变化,根据卷积计算和通道数量的变化,设置对应的参数
with tf.variable_scope("layer5-conv3"):
conv3_weights = tf.get_variable("weight", [3, 3, 64, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
with tf.name_scope("layer6-pool3"):
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# -----------------------第四层----------------------------
# 同上,不过参数的有变化,根据卷积计算和通道数量的变化,设置对应的参数
with tf.variable_scope("layer7-conv4"):
conv4_weights = tf.get_variable("weight", [3, 3, 128, 128],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0))
conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))
with tf.name_scope("layer8-pool4"):
pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
nodes = 6 * 6 * 128
reshaped = tf.reshape(pool4, [-1, nodes])
# -----------------------第五层----------------------------
with tf.variable_scope('layer9-fc1'):
# 初始化全连接层的参数,隐含节点为1024个
fc1_weights = tf.get_variable("weight", [nodes, 1024],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1))
# 使用relu函数作为激活函数
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
# 采用dropout层,减少过拟合和欠拟合的程度,保存模型最好的预测效率
if train: fc1 = tf.nn.dropout(fc1, 0.5)
# -----------------------第六层----------------------------
with tf.variable_scope('layer10-fc2'):
# 同上,不过参数的有变化,根据卷积计算和通道数量的变化,设置对应的参数
fc2_weights = tf.get_variable("weight", [1024, 512],
initializer=tf.truncated_normal_initializer(stddev=0.1))
评论0
最新资源