# encoding: utf-8
'''
@author: 真梦行路
@file: cnn.py
@time: 18-8-30 下午10:02
'''
#########################导入第三方库###################################
import time
import math
import random
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import dataset
import cv2
from sklearn.metrics import confusion_matrix
from datetime import timedelta
###################################加载数据路径########################################
train_path = '/home/wcy/图片/python/data/train'
test_path = '/home/wcy/图片/python/data0/test'
checkpoint_dir = '/home/wcy/图片/00/models'
###################################加载数据路径########################################
############################设定模型参数###########################################
# Convolutional Layer 1.
filter_size1 = 3
num_filters1 = 36
# Convolutional Layer 2.
filter_size2 = 3
num_filters2 = 36
# Convolutional Layer 3.
filter_size3 = 3
num_filters3 = 64
# Fully-connected layer.
fc_size = 256 # Number of neurons in fully-connected layer.
# Number of color channels for the images: 1 channel for gray-scale.
num_channels = 3
# image dimensions (only squares for now)
img_size = 70
# Size of image when flattened to a single dimension
img_size_flat = img_size * img_size * num_channels
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# class info
# classes = ['dog', 'cat']
classes = os.listdir(train_path)
num_classes = len(classes)
# batch size
batch_size = 1
# validation split
validation_size = .16
# how long to wait after validation loss stops improving before terminating training
early_stopping = None # use None if you don't want to implement early stoping
############################设定模型参数###########################################
############################读取数据##############################################
data = dataset.read_train_sets(train_path, img_size, classes, validation_size=validation_size)
test_images, test_ids = dataset.read_test_set(test_path, img_size)
print(data.train.labels.shape)
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(test_images)))
print("- Validation-set:\t{}".format(len(data.valid.labels)))
############################读取数据##############################################
######函数##################随机取出9副图显示######################################
def plot_images(images, cls_true, cls_pred=None):
if len(images) == 0:
print("no images to show")
return
else:
random_indices = random.sample(range(len(images)), min(len(images), 9))
images, cls_true = zip(*[(images[i], cls_true[i]) for i in random_indices])
# print('1111111111111111111',len(images))
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_size, img_size, num_channels))
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
######函数###################随机取出9副图显示######################################
############################随机取出9副图显示######################################
# # Get some random images and their labels from the train set.
# images, cls_true = data.train.images, data.train.cls
# # Plot the images and labels using our helper-function above.
# plot_images(images=images, cls_true=cls_true)
############################随机取出9副图显示######################################
############################增强层网络权重#########################################
def new_weights_1(shape):
v1=tf.Variable(tf.constant([1,0,1,0,1,
0,-4,-4,-4,0,
1,-4,24,-4,1,
0,-4,-4,-4,0,
1,0,1,0,1],dtype='float32',shape=[5,5,3,3],name='v1'))
return v1
############################增强层网络权重#########################################
############################定义网络结构##########################################
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def new_conv_layer(input, # The previous layer.
num_input_channels, # Num. channels in prev. layer.
filter_size, # Width and height of each filter.
num_filters, # Number of filters.
use_pooling=True): # Use 2x2 max-pooling.
# Shape of the filter-weights for the convolution.
# This format is determined by the TensorFlow API.
shape = [filter_size, filter_size, num_input_channels, num_filters]
# Create new weights aka. filters with the given shape.
weights = new_weights(shape=shape)
# print('##########',weights)
# Create new biases, one for each filter.
biases = new_biases(length=num_filters)
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
############################卷几操作#######################################
# weights_1 = new_weights_1(shape=shape)
# layer = tf.nn.conv2d(input=layer,
# filter=weights_1,
# strides=[1, 1, 1, 1],
# padding='SAME')
############################卷几操作#########################################
# Add the biases to the results of the convolution.
# A bias-value is added to each filter-channel.
layer += biases
# Use pooling to down-sample the image resolution?
if use_pooling:
# This is 2x2 max-pooling, which means that we
# consider 2x2 windows and select the largest value
# in each window. Then we move 2 pixels to the next window.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
layer = tf.nn.relu(layer)
return layer, weights
def flatten_layer(layer):
# Get the shape of the input layer.
layer_shape = layer.get_shape()
# The shape of the input layer is assumed to be:
# layer_shape == [num_images, img_height, img_width, num_channels]
# The number of features is: img_height * img_width * num_channels
# We can use a function from TensorFlow to calculate this.
num_features = layer_shape[1:4].num_elements()
# Reshape the layer to [num_images, num_features].
# Note that we just set the size of the second dimension
# to num_features and the size of the first dimension to -1
# which means the size in that dimension is calculated
# so the total size of the tensor is unchanged from the reshaping.
layer_flat = tf.reshape(layer, [-1, num_features])
# The shape of the flattened layer is now:
# [num_images, img_height * img_width * num_channels]
# Return both the flattened layer and the number of features.
return layer_flat, num_features
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
真梦行路
- 粉丝: 101
- 资源: 7
最新资源
- Golang_Puzzlers-新年主题资源
- vscode-vscode
- Algorithm Practice-冒泡排序
- gitmoji-vscode-vscode
- 常见查找算法-折半查找的实现
- StudentManageSystem-学生成绩链表处理
- Truora-Web-nodejs安装及环境配置
- DataStructure-建立学生信息链表
- discussion-vue3-master-通讯录排序
- PanUmlTools-类图
- datastructure-数据结构
- 计算机组成原理-计算机组成原理
- 24.7.8_sort-希尔排序
- renren-ui-nodejs安装及环境配置
- 大数据技术毕业设计源代码全套技术资料.zip
- 智慧农场小程序源代码全套技术资料.zip
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈