# =========================================================================
import tensorflow as tf
# =========================================================================
#
# images image batch 4D tensor tf.float32 [batch_size, width, height, channels]
# logits float [batch_size n_classes]
def inference(images, batch_size, n_classes):
#
# conv1
#
with tf.variable_scope('conv1') as scope:
weights = tf.Variable(tf.truncated_normal(shape=[3, 3, 3, 64], stddev=1.0, dtype=tf.float32),
name='weights', dtype=tf.float32)
biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[64]),
name='biases', dtype=tf.float32)
conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# pooling1
#
with tf.variable_scope('pooling1_lrn') as scope:
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling1')
norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
weights = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 16], stddev=0.1, dtype=tf.float32),
name='weights', dtype=tf.float32)
biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[16]),
name='biases', dtype=tf.float32)
conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name='conv2')
# pooling2
with tf.variable_scope('pooling2_lrn') as scope:
norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')
# fc3
with tf.variable_scope('local3') as scope:
reshape = tf.reshape(pool2, shape=[batch_size, -1])
dim = reshape.get_shape()[1].value
weights = tf.Variable(tf.truncated_normal(shape=[dim, 128], stddev=0.005, dtype=tf.float32),
name='weights', dtype=tf.float32)
biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[128]),
name='biases', dtype=tf.float32)
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
# fc4
with tf.variable_scope('local4') as scope:
weights = tf.Variable(tf.truncated_normal(shape=[128, 128], stddev=0.005, dtype=tf.float32),
name='weights', dtype=tf.float32)
biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[128]),
name='biases', dtype=tf.float32)
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')
# dropout
# with tf.variable_scope('dropout') as scope:
# drop_out = tf.nn.dropout(local4, 0.8)
with tf.variable_scope('softmax_linear') as scope:
weights = tf.Variable(tf.truncated_normal(shape=[128, n_classes], stddev=0.005, dtype=tf.float32),
name='softmax_linear', dtype=tf.float32)
biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[n_classes]),
name='biases', dtype=tf.float32)
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
return softmax_linear
# -----------------------------------------------------------------------------
# cal loss
def losses(logits, labels):
with tf.variable_scope('loss') as scope:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels,
name='xentropy_per_example')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope.name + '/loss', loss)
return loss
# --------------------------------------------------------------------------
# loss
# loss learning_rate
# train_op
def trainning(loss, learning_rate):
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
# -----------------------------------------------------------------------
def evaluation(logits, labels):
with tf.variable_scope('accuracy') as scope:
correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.cast(correct, tf.float16)
accuracy = tf.reduce_mean(correct)
tf.summary.scalar(scope.name + '/accuracy', accuracy)
return accuracy
没有合适的资源?快使用搜索试试~ 我知道了~
基于tensorflow实现猫狗识别代码(CNN)
共51个文件
jpg:42个
py:3个
data-00000-of-00001:1个
5星 · 超过95%的资源 需积分: 29 4 下载量 32 浏览量
2022-10-15
12:42:49
上传
评论 1
收藏 20MB RAR 举报
温馨提示
通过TensorFlow搭建卷积神经网络实现猫狗识别代码,训练和测试代码完整,下载之后可以直接运行测试打码,运行环境在Linux下,需要把代码中的路径修改为本机实际路径
资源详情
资源评论
资源推荐
收起资源包目录
CatVsDogRecong.rar (51个子文件)
CatVsDogRecong
log
events.out.tfevents.1534313958.ubuntu 223KB
test.py 2KB
train.py 3KB
003.jpg 13KB
model.py 5KB
model.pyc 4KB
modelsave
checkpoint 173B
model_ckpt-999.data-00000-of-00001 24.32MB
model_ckpt-999.meta 121KB
model_ckpt-999.index 1KB
train_image
0
008.jpg 22KB
017.jpg 17KB
012.jpg 16KB
005.jpg 16KB
019.jpg 15KB
006.jpg 16KB
011.jpg 17KB
004.jpg 14KB
018.jpg 16KB
014.jpg 24KB
000.jpg 20KB
013.jpg 19KB
015.jpg 20KB
001.jpg 20KB
010.jpg 20KB
002.jpg 20KB
009.jpg 16KB
003.jpg 18KB
016.jpg 16KB
007.jpg 13KB
1
008.jpg 11KB
017.jpg 15KB
012.jpg 32KB
005.jpg 18KB
019.jpg 17KB
006.jpg 25KB
011.jpg 43KB
004.jpg 14KB
018.jpg 13KB
014.jpg 15KB
000.jpg 24KB
013.jpg 26KB
015.jpg 34KB
001.jpg 14KB
010.jpg 12KB
002.jpg 32KB
009.jpg 14KB
003.jpg 13KB
016.jpg 15KB
007.jpg 33KB
0.jpg 20KB
共 51 条
- 1
hutian1993
- 粉丝: 1
- 资源: 149
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
评论5