import tensorflow as tf
slim = tf.contrib.slim
import readcifar10
import os
def model_fn_v1(net,keep_prob=0.5, is_training = True):
batch_norm_params = {
'is_training': is_training,
'decay': 0.997,
'epsilon': 1e-5,
'scale': True,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
endpoints = {}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(0.0001),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
net = slim.conv2d(net, 32, [3, 3], activation_fn=None, normalizer_fn=None, scope='conv1')
net = slim.conv2d(net, 32, [3, 3], activation_fn=None, normalizer_fn=None, scope='conv2')
endpoints["conv2"] = net
net = slim.max_pool2d(net, [3, 3], stride=2, scope="pool1")
net = slim.conv2d(net, 64, [3, 3], activation_fn=None, normalizer_fn=None, scope='conv3')
net = slim.conv2d(net, 64, [3, 3], activation_fn=None, normalizer_fn=None, scope='conv4')
endpoints["conv4"] = net
net = slim.max_pool2d(net, [3, 3], stride=2, scope="pool2")
net = slim.conv2d(net, 128, [3, 3], activation_fn=None, normalizer_fn=None, scope='conv5')
net = slim.conv2d(net, 128, [3, 3], activation_fn=None, normalizer_fn=None, scope='conv6')
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
net = slim.flatten(net)
net = slim.dropout(net, keep_prob, scope='dropout1')
net = slim.fully_connected(net, 10, activation_fn=None, scope='fc2')
endpoints["fc"] = net
return net
def resnet_blockneck(net, kernel_size, down, stride, is_training):
batch_norm_params = {
'is_training': is_training,
'decay': 0.997,
'epsilon': 1e-5,
'scale': True,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
shortcut = net
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(0.0001),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME') as arg_sc:
if kernel_size != net.get_shape().as_list()[-1]:
shortcut = slim.conv2d(net, kernel_size, [1, 1])
if stride != 1:
shortcut = slim.max_pool2d(shortcut, [3, 3], stride=stride, scope="pool1")
net = slim.conv2d(net, kernel_size // down, [1, 1])
net = slim.conv2d(net, kernel_size // down, [3, 3])
if stride != 1:
net = slim.max_pool2d(net, [3, 3], stride=stride, scope="pool1")
net = slim.conv2d(net, kernel_size, [1, 1])
net = net + shortcut
return net
def model_fn_resnet(net, keep_prob=0.5, is_training = True):
with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME') as arg_sc:
net = slim.conv2d(net, 64, [3, 3], activation_fn=tf.nn.relu)
net = slim.conv2d(net, 64, [3, 3], activation_fn=tf.nn.relu)
net = resnet_blockneck(net, 128, 4, 2, is_training)
net = resnet_blockneck(net, 128, 4, 1, is_training)
net = resnet_blockneck(net, 256, 4, 2, is_training)
net = resnet_blockneck(net, 256, 4, 1, is_training)
net = resnet_blockneck(net, 512, 4, 2, is_training)
net = resnet_blockneck(net, 512, 4, 1, is_training)
#net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
net = slim.flatten(net)
net = slim.fully_connected(net, 1024, activation_fn=tf.nn.relu, scope='fc1')
net = slim.dropout(net, keep_prob, scope='dropout1')
net = slim.fully_connected(net, 10, activation_fn=None, scope='fc2')
return net
def model(image, keep_prob=0.5, is_training=True):
batch_norm_params = {
"is_training": is_training,
"epsilon": 1e-5,
"decay": 0.997,
'scale': True,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(0.0001),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
net = slim.conv2d(image, 32, [3, 3], scope='conv1')
net = slim.conv2d(net, 32, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = slim.conv2d(net, 64, [3, 3], scope='conv3')
net = slim.conv2d(net, 64, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool2')
net = slim.conv2d(net, 128, [3, 3], scope='conv5')
net = slim.conv2d(net, 128, [3, 3], scope='conv6')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool3')
net = slim.conv2d(net, 256, [3, 3], scope='conv7')
net = tf.reduce_mean(net, axis=[1, 2]) # nhwc--n11c
net = slim.flatten(net)
net = slim.fully_connected(net, 1024)
net = slim.dropout(net, keep_prob)
net = slim.fully_connected(net, 10)
return net # 10 dim vec
def func_optimal(loss_val):
with tf.variable_scope("optimizer"):
global_step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(0.0001, global_step,
decay_steps=10000,
decay_rate=0.99,
staircase=True)
# ##更新 BN
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(lr).minimize(loss_val, global_step)
return optimizer, global_step, lr
def loss(logist, label):
one_hot_label = slim.one_hot_encoding(label, 10)
slim.losses.softmax_cross_entropy(logist, one_hot_label)
reg_set = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.add_n(reg_set)
slim.losses.add_loss(l2_loss)
totalloss = slim.losses.get_total_loss()
return totalloss, l2_loss
def train_net():
batchsize = 128
floder_name = "logdirs"
no_data = 1
if not os.path.exists(floder_name):
os.mkdir(floder_name)
images_train, labels_train = readcifar10.read_from_tfrecord_v1(batchsize, 0, no_data)
images_test, labels_test = readcifar10.read_from_tfrecord_v1(batchsize, 1)
input_data = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name="input_224")
input_label = tf.placeholder(tf.int64, shape=[None], name="input_label")
is_training = tf.placeholder(tf.bool, shape=None, name = "is_training")
keep_prob = tf.placeholder(tf.float32, shape=None, name= "keep_prob")
logits = model(input_data, keep_prob=keep_prob)
softmax = tf.nn.softmax(logits)
pred_max = tf.argmax(softmax, 1)
correct_pred = tf.equal(input_label, pred_max)
accurancy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
total_loss, l2_loss = loss(logits, input_label)
# one_hot_labels = slim.one_hot_encoding(input_label, 10)
# slim.losses.softmax_cross_entropy(logits, one_hot_labels)
#
# reg_set = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# l2_loss = tf.a
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
基于股票历史数据实现股票价格预测的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。 基于股票历史数据实现股票价格预测的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。基于股票历史数据实现股票价格预测的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。基于股票历史数据实现股票价格预测的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。基于股票历史数据实现股票价格预测的Python仿真源码+数据(课程设计).zip 已获导师指导并通过的97分的高分课程设计项目,可作为课程设计和期末大作业,下载即用无需修改,项目完整确保可以运行。基于股
资源推荐
资源详情
资源评论
收起资源包目录
基于股票历史数据实现股票价格预测的Python仿真.zip (24个子文件)
基于股票历史数据实现股票价格预测的Python仿真(完整源码+数据)
stock data
0600029_ori.csv 578KB
0601988_ori.csv 512KB
0600111_ori.csv 763KB
0600030_ori.csv 617KB
0601998_ori.csv 478KB
x000001_ori.csv 910KB
0601398_ori.csv 497KB
0600977_ori.csv 197KB
0601728_ori.csv 41KB
j000001_ori.csv 620KB
600015_ori.csv 490KB
0601939_ori.csv 460KB
600012_ori.csv 510KB
000001_ori.csv 910KB
0600050_ori.csv 611KB
.keep 0B
0600051_ori.csv 738KB
x601998_ori.csv 404KB
demo
demo_cifar10
readcifar10.py 2KB
resnet.py 3KB
train.py 7KB
convert_cifar10_image.py 2KB
test.py 11KB
writer_cifar10.py 2KB
共 24 条
- 1
资源评论
- 蓝山魂断2024-04-23超级好的资源,很值得参考学习,对我启发很大,支持!
- 2301_776402172023-12-25资源很好用,有较大的参考价值,资源不错,支持一下。
猰貐的新时代
- 粉丝: 1w+
- 资源: 2571
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功