# encoding: utf-8
'''
@author: weiyang_tang
@contact: weiyang_tang@126.com
@file: model_train.py
@time: 2019-02-22 10:18
@desc: 训练模型
'''
from __future__ import print_function
import random
import numpy as np
from keras import backend as K
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.models import Sequential
from keras.models import load_model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from data_input import extract_data, resize_with_pad, IMAGE_SIZE, read_file
faceData_file_path = './faceData/'
class Dataset(object):
def __init__(self):
self.X_train = None
self.X_valid = None
self.X_test = None
self.Y_train = None
self.Y_valid = None
self.Y_test = None
def read(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE, img_channels=3, nb_classes=3):
# images, labels = extract_data('./faceData/')
images, labels, counter = read_file(faceData_file_path)
nb_classes = counter
print(nb_classes)
print(images)
print(labels)
labels = np.reshape(labels, [-1])
# numpy.reshape
X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.3,
random_state=random.randint(0, 100))
X_valid, X_test, y_valid, y_test = train_test_split(images, labels, test_size=0.5,
random_state=random.randint(0, 100))
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_valid = X_valid.reshape(X_valid.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_valid = X_valid.reshape(X_valid.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# the data, shuffled and split between train and test sets
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_valid.shape[0], 'valid samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_valid = np_utils.to_categorical(y_valid, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_valid = X_valid.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_valid /= 255
X_test /= 255
self.X_train = X_train
self.X_valid = X_valid
self.X_test = X_test
self.Y_train = Y_train
self.Y_valid = Y_valid
self.Y_test = Y_test
class Model(object):
FILE_PATH = './model/model.h5'
def __init__(self):
self.model = None
def build_model(self, dataset, nb_classes=3):
images, labels, counter = read_file(faceData_file_path)
nb_classes = counter
self.model = Sequential()
self.model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=dataset.X_train.shape[1:]))
self.model.add(Activation('relu'))
self.model.add(Convolution2D(32, 3, 3))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Convolution2D(64, 3, 3, border_mode='same'))
self.model.add(Activation('relu'))
self.model.add(Convolution2D(64, 3, 3))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(512))
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(nb_classes))
self.model.add(Activation('softmax'))
self.model.summary()
def train(self, dataset, batch_size=32, nb_epoch=40, data_augmentation=True):
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
if not data_augmentation:
print('Not using data augmentation.')
self.model.fit(dataset.X_train, dataset.Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(dataset.X_valid, dataset.Y_valid),
shuffle=True)
else:
print('Using real-time data augmentation.')
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(dataset.X_train)
# fit the model on the batches generated by datagen.flow()
self.model.fit_generator(datagen.flow(dataset.X_train, dataset.Y_train,
batch_size=batch_size),
samples_per_epoch=dataset.X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(dataset.X_valid, dataset.Y_valid))
def save(self, file_path=FILE_PATH):
print('Model Saved.')
self.model.save(file_path)
def load(self, file_path=FILE_PATH):
print('Model Loaded.')
self.model = load_model(file_path)
def predict(self, image):
if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
image = resize_with_pad(image)
image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE))
elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
image = resize_with_pad(image)
image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
image = image.astype('float32')
image /= 255 # 将图像矩阵转化到0-1之间
result = self.model.predict_proba(image)
max_index = np.argmax(result) # 找出概率最高的
print('准确度为', result[0][max_index])
# if (result < 0.8): # 获取识别的可靠性
# return -1
result = self.model.predict_classes(image)
return result[0]
def evaluate(self, dataset):
score = self.model.evaluate(dataset.X_test, dataset
没有合适的资源?快使用搜索试试~ 我知道了~
基于opencv ,Keras, Tensorflow的人脸识别.zip
共7个文件
py:5个
xml:1个
h5:1个
0 下载量 79 浏览量
2024-03-27
17:10:21
上传
评论
收藏 46.48MB ZIP 举报
温馨提示
人工智能-深度学习-tensorflow
资源推荐
资源详情
资源评论
收起资源包目录
基于opencv ,Keras, Tensorflow的人脸识别.zip (7个子文件)
FaceRecognition_opencv-Keras-Tensorflow-master
__init__.py 130B
face_capture.py 3KB
data_input.py 3KB
camera_reader.py 2KB
model
model.h5 49.56MB
model_train.py 8KB
cv2_data
haarcascade_frontalface_alt.xml 661KB
共 7 条
- 1
资源评论
博士僧小星
- 粉丝: 1934
- 资源: 5894
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- 基于HTML的旅游网页制作源码设计.zip
- 基于HTML的旅游网页制作源码设计.zip
- 大数据实战Demo系统-MaxCompute数据仓库数据转换实践(PDF格式).rar
- 六一儿童节代码祝福六一儿童节代码祝福六一儿童节代码祝福.txt
- sql语句sql语句sql语句sql语句.txt
- ubuntu20.04安装教程ubuntu20.04安装教程.txt
- imgcache.0
- 高分项目基于faster-rcnn知识蒸馏的目标检测模型增量深度学习方法源码.zip
- 基于python和图数据库neo4j构建电影应用(高分毕设项目)
- 基于yolov5+SAHI模块完成超分辨率以及小目标检测演示源码+运行说明.zip
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功