from keras.applications.vgg16 import VGG16
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Dropout, Flatten, Dense, Convolution2D, MaxPooling2D
import os
import cv2
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
root_path = "D:/C Moving Destination/Documents/Tencent Files/2451360621/FileRecv/vggface/"
dirs = os.listdir(root_path)
img_paths = []
lens = []
for dir_ in dirs:
img_filenames = os.listdir(root_path + dir_)
lens.append(len(os.listdir(root_path + dir_)))
for name in img_filenames:
img_paths.append(root_path + dir_ + "/" + name)
labels = []
for i in range(len(lens)):
for l in range(lens[i]):
labels.append(i)
labels = np.array(labels)
imgs = []
for path in img_paths:
img = cv2.imread(path)
img = cv2.resize(img, (150, 150))
img = img[:, :, ::-1]
img = np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2]))
imgs.append(img)
imgs = np.concatenate(imgs, axis=0)
labels = to_categorical(labels, len(np.unique(labels)))
x_train, x_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.1)
model = Sequential()
model.add(
Convolution2D(input_shape=(150, 150, 3), filters=32, kernel_size=3, strides=1, padding='same', activation='relu'))
model.add(Convolution2D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2, padding='valid'))
model.add(Convolution2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu'))
model.add(Convolution2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2, padding='valid'))
model.add(Convolution2D(filters=128, kernel_size=3, strides=1, padding='same', activation='relu'))
model.add(Convolution2D(filters=128, kernel_size=3, strides=1, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2, padding='valid'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(y_train.shape[1], activation='softmax'))
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics=['accuracy'])
model.fit(x_train, y_train, epochs=20, validation_data=(x_test, y_test))
model.save("cnn.h5")
img = cv2.imread("D:/C Moving Destination/Documents/Tencent Files/2451360621/FileRecv/vggface/n000009/0001_01.jpg")
img = cv2.resize(img, (150, 150))
img = img[:, :, ::-1]
img = np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2]))
print(dirs[np.argmax(model.predict(img))])
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
卷积神经网络 Python tensorflow keras CNN VGG16 imagenet 预训练权重 人脸识别分类 训练集测试集评估准确率 maxpolling dropout jupyter notebook numpy pandas 数据分析 数据挖掘 深度学习 机器学习 人工智能
资源推荐
资源详情
资源评论
收起资源包目录
65.rar (9个子文件)
vgg16.h5 24.04MB
h165.py 2KB
bottleneck_features_train.npy 31.69MB
cnn.py 3KB
fcn.h5 24.04MB
cv.ipynb 74KB
h165.ipynb 12KB
test.py 959B
cv.py 2KB
共 9 条
- 1
资源评论
HinomotoOniko
- 粉丝: 1831
- 资源: 139
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功