#coding=UTF8
# 昵 称:XieXu
# 时 间: 2022/11/21/0021 14:54
# 【设置GPU】
import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")
if gpus:
tf.config.experimental.set_memory_growth(gpus[0], True) #设置GPU显存用量按需使用
tf.config.set_visible_devices([gpus[0]],"GPU")
# 打印显卡信息,确认GPU可用
print(gpus)
# 【导入数据集,查看数据】
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
import os,PIL
# 设置随机种子尽可能使结果可以重现
import numpy as np
np.random.seed(1)
# 设置随机种子尽可能使结果可以重现
import tensorflow as tf
tf.random.set_seed(1)
import pathlib
# data_dir = "./data"
data_dir = "./data"
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*')))
print("图片总数为:",image_count)
batch_size = 4
img_height = 299
img_width = 299
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
# 划分 训练集 和 测试集
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=12,
image_size=(img_height, img_width),
batch_size=batch_size)
# 上述 输出:
# Found 4000 files belonging to 4 classes.
# Using 3200 files for training.
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=12,
image_size=(img_height, img_width),
batch_size=batch_size)
# 上述输出
# Found 4000 files belonging to 4 classes.
# Using 800 files for validation.
class_names = train_ds.class_names
print('class_names:',class_names) # class_names: ['cat', 'chook', 'dog', 'horse']
for image_batch, labels_batch in train_ds:
print('image_batch.shape:',image_batch.shape) # image_batch.shape: (4, 299, 299, 3)
print('labels_batch.shape:',labels_batch.shape) # labels_batch.shape: (4,)
break
AUTOTUNE = tf.data.AUTOTUNE
train_ds = (
train_ds.cache()
.shuffle(1000)
# .map(train_preprocessing) # 这里可以设置预处理函数
# .batch(batch_size) # 在image_dataset_from_directory处已经设置了batch_size
.prefetch(buffer_size=AUTOTUNE)
)
val_ds = (
val_ds.cache()
.shuffle(1000)
# .map(val_preprocessing) # 这里可以设置预处理函数
# .batch(batch_size) # 在image_dataset_from_directory处已经设置了batch_size
.prefetch(buffer_size=AUTOTUNE)
)
# ====================================#
# Xception的网络部分
# ====================================#
# from tensorflow.keras.preprocessing import image
from keras.preprocessing import image
from tensorflow.python.keras.models import Model
from tensorflow.python.keras import layers
# from tensorflow.python.keras.layers import Dense, Input, BatchNormalization, Activation, Conv2D, SeparableConv2D, MaxPooling2D
from tensorflow.python.keras.layers import Dense, Input, Activation, Conv2D, SeparableConv2D, MaxPooling2D
from keras.layers import BatchNormalization
from tensorflow.python.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.python.keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
# from tensorflow.python import keras # AttributeError: module 'tensorflow.python.keras' has no attribute 'model'
from keras.models import load_model
def Xception(input_shape=[299, 299, 3], classes=1000):
img_input = Input(shape=input_shape)
# =================#
# Entry flow
# =================#
# block1
# 299,299,3 -> 149,149,64
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
# block2
# 149,149,64 -> 75,75,128
residual = Conv2D(128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
# block3
# 75,75,128 -> 38,38,256
residual = Conv2D(256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
# block4
# 38,38,256 -> 19,19,728
residual = Conv2D(728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
# =================#
# Middle flow
# =================#
# block5--block12
# 19,19,728 -> 19,19,728
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
# =================#
# Exit flow
# =================#
# block13
# 19,19,728 -> 10,10,1024
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableCon
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
基于深度学习的卷积神经网络的四种动物识别案例分享(含数据集).zip (2000个子文件)
OIP-ognaNzbjNCPyDKFGzSXisAAAAA.jpeg 42KB
OIP-BKNHngY76s9W4RWh1INgcQHaGl.jpeg 34KB
OIP-71BAd5MwIzkf466Cqmc2pgHaHd.jpeg 34KB
OIP-0MDhq5_ru-lCoHzv_XUkGAHaHa.jpeg 31KB
OIP-HR_ROHI-3y14e54AlUWUXQAAAA.jpeg 31KB
OIP-Bi-RlkRNa1fPaBV7s1hBowHaHY.jpeg 30KB
OIP-fIsURfEyX8o0M9eTV58lSAHaIW.jpeg 30KB
28.jpeg 30KB
OIP-CavQp9o2cSDoa5lZNNjZUgHaHa.jpeg 30KB
OIP-upGPZ50gdlaPeqQZcqewoQHaFj.jpeg 30KB
OIP-1SqM1TE2gs6KEp5yQPw_bgHaGy.jpeg 30KB
OIP-R4vtGxLkoJ5tzrjiO8lG8AHaIa.jpeg 29KB
OIP-1Qs70Jqa6zrHbWQYPRUhEAHaHa.jpeg 29KB
OIP-ZZqnhvKvPMcxR-K6JoVpWwHaG6.jpeg 29KB
OIP-ng0cXCo2hawdIJ-RnwWHhQHaGl.jpeg 28KB
OIP-5rfFnNbU8dmSbbt2ru-_QgHaH0.jpeg 28KB
OIP-_Sq48sdLU8OKqaJtMj8VZAHaIT.jpeg 28KB
OIP-QH1eZNiBQvdobz_d5D6nsgHaGi.jpeg 28KB
OIP-d7rWAgE1FtdYm3RhdlBMqAHaGx.jpeg 28KB
OIP-AoTKMNLS9ZOmUELHW2BBLAHaIE.jpeg 28KB
OIP-_yS9-9eIEztFUQgTqz2PVgHaGW.jpeg 28KB
OIP-6eDD1go8C_uyIMRXo9NszgAAAA.jpeg 28KB
OIP-OSIblIpWoRmm5mzx182-YAHaGt.jpeg 27KB
OIP-pvvEatNk8wX0ZG4r7oEuEgHaHq.jpeg 27KB
OIP-2zubZT6pNB2K_SxSPQfQ-QHaHa.jpeg 27KB
OIP-1wY8PtZKE9MfktKX7vegIgHaHa.jpeg 27KB
OIP-VyJaXomuB9_4YRnvB7aVIAHaFj.jpeg 27KB
OIP-yuv-wjOySwOQ6gCs6fQ1IgAAAA.jpeg 27KB
OIP-YxdHuyG3D_h2GxgPAoYkfAHaHa.jpeg 27KB
OIP-vyPgqB9TLqe3qehJrW3mhAHaHG.jpeg 27KB
402.jpeg 27KB
839.jpeg 26KB
10.jpeg 26KB
OIP-QVyCbY6HJ0QizYrtqAxfKAHaHa.jpeg 26KB
OIP-lgjttm9NkqZTQcMkvMIxSAHaHa.jpeg 26KB
OIP-PYO2QxrLHBe-Z_D2OP-mPAHaHL.jpeg 26KB
OIP-9TnKDxB2RVlmvu889-XnqgHaGl.jpeg 26KB
OIP-pYQpJdTKiS0ERYunZdBqAQHaG5.jpeg 26KB
OIP-JvYvnC2Mob1i7kyu1OAh-gHaFj.jpeg 26KB
OIP-uTJV7KB8T38B5mYA7-zUZwHaHX.jpeg 26KB
OIP-gtSHEYq5G7Iw3T_X_Jn1EQHaHa.jpeg 26KB
OIP-6ZtjxMbaEM5aAh6C_otsWwHaGk.jpeg 26KB
OIP-SJ_e7uh_RPd6BJDIjRSDrwHaHI.jpeg 26KB
OIP-VYft0_R9PK-V04y-MdEG9AHaFj.jpeg 26KB
OIP-EHJvRhY2gdrM31v0rByhywHaG1.jpeg 26KB
OIP--2l1RBZrwbcEta73nYEu6gHaGs.jpeg 26KB
OIP-L4ZmHRW8Fru6zJpITjMy5gHaGX.jpeg 26KB
OIP-63VAvmrbTpdfPHmUNmlyZwHaH6.jpeg 26KB
OIP-LLLYoxDcAp6jC9VYAbwI1QAAAA.jpeg 26KB
OIP-Nea4U5dVX7D-upozAM5HRQHaHL.jpeg 25KB
OIP-NtFJBFEUrq9DYeWLrmOZZgHaFj.jpeg 25KB
OIP-D60KxMG4FyyptQpBl9YhwAHaIF.jpeg 25KB
OIP-CDAR3yj6nS6QxrSrCegiowHaGP.jpeg 25KB
OIP-jexHjnXSiKExACKI3lxAHwHaGc.jpeg 25KB
OIP-yLbyIOxUy4yozO8b8s74mQHaHZ.jpeg 25KB
OIP-kjQMOXZ8LaV5CVbNBPkWRQHaH6.jpeg 25KB
OIP-7u_n4O_rhHEXUzyAOTo2swHaJP.jpeg 25KB
OIP-WyYdlZhhbbM1D0QoMbguSAAAAA.jpeg 25KB
112.jpeg 25KB
OIP-WgPzfsAlsGZzSlp6E5pUiwHaFj.jpeg 25KB
OIP-XpdPyhz2DsufGcbOAoZKtwHaFj.jpeg 25KB
OIP-KbR1vj_CXlXZmbgo6oFGFwHaHa.jpeg 25KB
872.jpeg 25KB
OIP-btwyAwXudsNH2vVyM4ZqEQHaGD.jpeg 25KB
OIP-b4o4cQr1nxlZa78ITNKj4AHaGY.jpeg 25KB
OIP-aLAIfL4PPk2xVxIWT7R-MwHaHa.jpeg 25KB
OIP-5L1_hLwThlZ4L1SRAW_cGQAAAA.jpeg 25KB
OIP-e0gi665nrl_ysaesTdvPwgHaHO.jpeg 25KB
OIP-Q9qDXF-dDMD1OTlAxgKKwAHaGd.jpeg 25KB
OIP-J8Du2LlP-oM_m3FnbtOeTgHaHd.jpeg 25KB
OIP-dlnUQeAew3uCEjW2H5n3SQHaGB.jpeg 25KB
OIP-8dEi9sUkWCLGZB4_CZ0eSwHaFj.jpeg 25KB
OIP-JZuax5e_u9qNu4Qbq5YwnwHaHh.jpeg 24KB
OIP-qLLRc8ef7ASar3-W7eSlkgHaFq.jpeg 24KB
1121.jpeg 24KB
1121.jpeg 24KB
OIP-IsF8RAwYldpmNVMUSyb55QHaHK.jpeg 24KB
OIP-KAYMWHrgaLs5z58TCBTD3QHaFl.jpeg 24KB
OIP-dHuBemyQBjdTSToV092QQQHaHP.jpeg 24KB
OIP-TkmznzHEfQeIpEIkDAP1oAHaGG.jpeg 24KB
OIP-GDuU5qUSMHn21BieuNm7ugHaFW.jpeg 24KB
1075.jpeg 24KB
1075.jpeg 24KB
OIP-29NUbm3oNyrrUo3dcL0WJgHaJb.jpeg 24KB
OIP-bycHA-2ajXzaQV2kjxl2UAHaHc.jpeg 24KB
871.jpeg 24KB
37.jpeg 24KB
OIP-SHuw2i1tgctdFKEKodTboAHaFj.jpeg 24KB
OIP-42hG55ep120uLw0AZ7zybwHaHa.jpeg 24KB
OIP-2qfqwXTn_3NCnRQy0usrcgHaHY.jpeg 24KB
OIP-7Es8P_OjyihqAwzcTBW0ZgHaFj.jpeg 24KB
21.jpeg 24KB
OIP-IUOQv6jQHsY30a9stVciTgHaFa.jpeg 24KB
OIP-7cL3MRF2Ck_F5QiunlVZLgHaFc.jpeg 24KB
OIP-y01qP-jsRvBMkLTPvhcZFwHaJi.jpeg 24KB
16.jpeg 24KB
OIP-1TAVqN6lx6ytUZozD40uZAHaHa.jpeg 24KB
1640.jpeg 24KB
1640.jpeg 24KB
1008.jpeg 24KB
共 2000 条
- 1
- 2
- 3
- 4
- 5
- 6
- 20
追光者♂
- 粉丝: 2w+
- 资源: 516
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- 基于matlab实现车牌识别程序,和论文,自己做的,做毕业设计的可以看看 .rar
- Windows系统下安装与配置Neo4j的步骤
- 基于matlab实现潮流计算和最优潮流计算的程序1,对毕业设计有一定用处.rar
- 基于大数据学习资源推荐系统的设计与实现(部署视频)-kaic.mp4
- 哈工大形式语言和自动机2022期末含答案
- Windows系统下安装与配置Neo4j的步骤
- 哈希算法(Hash Algorithm)是一种将任意长度的二进制数据映射为较短的、固定长度的二进制值的函数.txt
- Windows系统下安装与配置Neo4j的步骤
- 在二叉树或更复杂的树形结构中,先序输出叶结点.txt
- 列出所有祖先结点的概念通常与树形结构或图论中的节点相关.txt
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
前往页