# -*- coding: utf-8 -*-
#Author: weir
#E-mail: weirmicle@163.com
#Date: 2017/12/23
#the kernel model is from http://blog.csdn.net/wang1127248268/article/details/77258055
import numpy as np
import matplotlib.pyplot as plt
import cv2
import struct
import tensorflow as tf
import keras.backend.tensorflow_backend as ktf
import keras as ks
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Activation
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.utils.np_utils import to_categorical
######################
input_imgs_num = 20000#max 60_000
test_imgs_num = 10000#max 10_000
batch_size = 50
epochs=20
######################
def get_session(gpu_fraction=0.333):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction, allow_growth=True)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# train path:
train_images_idx3_ubyte_file = './data/train-images-idx3-ubyte'
train_labels_idx1_ubyte_file = './data/train-labels-idx1-ubyte'
# test path:
test_images_idx3_ubyte_file = './data/t10k-images-idx3-ubyte'
test_labels_idx1_ubyte_file = './data/t10k-labels-idx1-ubyte'
def decode_idx3_ubyte_img(idx3_ubyte_file):
"""
:param idx3_ubyte_file: the path of file
:return ndarray
byte[0 :3 ]: magic number, the file formatter flag
byte[4 :7 ]: id0,img_batches
byte[8 :11]: id1,img_rows
byte[12:15]: id2,img_cols
byte[16: ]: pixels of ubyte
"""
binfile = open(idx3_ubyte_file, "rb")
bin_head = binfile.read(16)
magic_num, img_batches, img_rows, img_cols = struct.unpack(">4i", bin_head)
bin_data = binfile.read()
fmt = ">%dB" %(img_batches*img_rows*img_cols)
imgs_ld = struct.unpack(fmt, bin_data)
imgs_2d = np.array(imgs_ld).reshape([img_batches, img_rows, img_cols])
imgs_2d = imgs_2d.astype(np.uint8)
return imgs_2d
def decode_idx1_ubyte_img(idx1_ubyte_file):
"""
:param idx3_ubyte_file: the path of file
:return ndarray
byte[0 :3 ]: magic number, the file formatter flag
byte[4 :7 ]: id0,number of items
"""
binfile = open(idx1_ubyte_file, "rb")
bin_head = binfile.read(8)
magic_num, num_items = struct.unpack(">2i", bin_head)
bin_data = binfile.read()
fmt = ">%dB" %(num_items)
imgs_ld = struct.unpack(fmt, bin_data)
imgs_ld = np.array(imgs_ld, dtype=np.uint8)
return imgs_ld
def img_3dto4d(img):
print("in 3dto4d(),img.shape", img.shape,"img.dtype: ", img.dtype)
return img.reshape(img.shape[0], 28, 28, 1)
def main():
ktf.set_session(get_session())
#get train set:
train_imgs = decode_idx3_ubyte_img(train_images_idx3_ubyte_file)
train_label = decode_idx1_ubyte_img(train_labels_idx1_ubyte_file)
#get test set:
test_imgs = decode_idx3_ubyte_img(test_images_idx3_ubyte_file)
test_label = decode_idx1_ubyte_img(test_labels_idx1_ubyte_file)
train_img4d = img_3dto4d(train_imgs)
test_img4d = img_3dto4d(test_imgs)
labels_onehot = np_utils.to_categorical(train_label, num_classes=10)#to onehot
test_labels_onehot = np_utils.to_categorical(test_label, num_classes=10)#to onehot
x_imgs = train_img4d[0:input_imgs_num]
y_labels = labels_onehot[0:input_imgs_num]
x_imgs.astype('float32')
x_imgs = x_imgs/255;
#model = ks.models.Sequential()
#model.add(Conv2D(filters=128, kernel_size=(3,3), strides=1, input_shape=(28,28,1),padding='valid', activation='relu'))
#model.add(Conv2D(32,kernel_size=(3,3),activation='relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.25))
#model.add(Flatten())
#model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.5))
#model.add(Dense(10, activation='softmax'))
model = Sequential()
model.add(Conv2D(32,(5,5),strides=(1,1),input_shape=(28,28,1),padding='valid',activation='relu',kernel_initializer='uniform'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,(5,5),strides=(1,1),padding='valid',activation='relu',kernel_initializer='uniform'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(100,activation='relu'))
model.add(Dense(10,activation='softmax'))
model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
#model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model.fit(x=x_imgs,y=y_labels, batch_size=batch_size,epochs=epochs,verbose=2)
y=model.predict(test_img4d[0:test_imgs_num])
#print("the predict result output is",y)
res=np.array([np.argmax(i) for i in y])
#print("---the real digits is", test_label[0:test_imgs_num])
#print("the predict digits is",res)
match = 0
for i in range(test_imgs_num):
if test_label[i] == res[i]:
match = match + 1
match_ratio = 1.0 * match / test_imgs_num
print(match, "/", test_imgs_num, "match_ratio = ", match_ratio)
eval = model.evaluate(test_img4d, test_labels_onehot, 128)
print(eval)
if __name__ == "__main__":
main()
lenet_test.zip_ai_keras_lenet_lenet test_tensorflow
版权申诉
59 浏览量
2022-09-24
22:24:36
上传
评论
收藏 11.06MB ZIP 举报
weixin_42651887
- 粉丝: 76
- 资源: 1万+
最新资源
- 5uonly.apk
- 2023-04-06-项目笔记 - 第一百十九阶段 - 4.4.2.117全局变量的作用域-117 -2024.04.30
- 2023-04-06-项目笔记 - 第一百十九阶段 - 4.4.2.117全局变量的作用域-117 -2024.04.30
- 前端开发技术实验报告:内含4四实验&实验报告
- Highlight Plus v20.0.1
- 林周瑜-论文.docx
- 基于MIC+NE555光敏电阻的声光控电路Multisim仿真原理图
- 基于JSP毕业设计-基于WEB操作系统课程教学网站的设计与实现(源代码+论文).zip
- 基于LM324和LM386的音响放大器Multisim仿真+PCB电路原理图
- Python机器学习与数据挖掘环境配置与库验证
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
评论0