from __future__ import print_function
import tensorflow as tf
import numpy as np
import scipy.io as sio
import time
import random
import h5py
# Functions for deep neural network weights initialization
def ini_weights(n_input, n_hidden_1, n_hidden_2, n_hidden_3, n_output):
weights = {
'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1]) / np.sqrt(n_input)),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2]) / np.sqrt(n_hidden_1)),
'h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3]) / np.sqrt(n_hidden_2)),
'out': tf.Variable(tf.truncated_normal([n_hidden_3, n_output])) / n_hidden_3,
}
biases = {
'b1': tf.Variable(tf.ones([n_hidden_1]) * 0.1),
'b2': tf.Variable(tf.ones([n_hidden_2]) * 0.1),
'b3': tf.Variable(tf.ones([n_hidden_3]) * 0.1),
'out': tf.Variable(tf.ones([n_output]) * 0.1),
}
return weights, biases
# Functions for deep neural network structure construction
def multilayer_perceptron(x, weights, biases, input_keep_prob, hidden_keep_prob):
x = tf.nn.dropout(x, input_keep_prob) # dropout layer
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) # x = wx+b
layer_1 = tf.nn.relu(layer_1) # x = max(0, x)
layer_1 = tf.nn.dropout(layer_1, hidden_keep_prob) # dropout layer
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
layer_2 = tf.nn.dropout(layer_2, hidden_keep_prob)
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
layer_3 = tf.nn.dropout(layer_3, hidden_keep_prob)
out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
out_layer = tf.nn.relu(out_layer)
return out_layer
# Functions for deep neural network training
def train(X, Y, location, training_epochs=10, batch_size=100, LR=0.0001, n_hidden_1=1000, n_hidden_2=800, n_hidden_3=1500,
traintestsplit=0.2, LRdecay=1):
num_total = X.shape[0] # number of total samples
num_val = int(num_total * traintestsplit) # number of validation samples
num_train = num_total - num_val # number of training samples
n_input = X.shape[1] # input size
n_output = Y.shape[1] # output size
X_train = X[0:num_train, :] # training data
Y_train = Y[0:num_train, :] # training label
X_val = X[num_train:num_total, :] # validation data
Y_val = Y[num_train:num_total, :] # validation label
# vData = h5py.File('/home/b253/Desktop/fsy2/31second.mat')
# vData_t = np.transpose(vData['f2'])
# np.save('31second.npy', vData_t)
# vaData = np.load('31second.npy')
# vnum_all = len(vaData)
# vindex = [i for i in range(vnum_all)] # i=0~10679
# random.shuffle(vindex) # 实现编号的乱序
# vaData = vaData[vindex]
# X_val =vaData[0:vnum_all, 5:1005] # validation data
# Y_val =vaData[0:vnum_all, 0:5] # validation label
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
is_train = tf.placeholder("bool")
learning_rate = tf.placeholder(tf.float32, shape=[])
total_batch = int(num_total / batch_size)
print('train: %d ' % num_train,'test_accuracy: %d ' % num_val)
input_keep_prob = tf.placeholder(tf.float32)
hidden_keep_prob = tf.placeholder(tf.float32)
weights, biases = ini_weights(n_input, n_hidden_1, n_hidden_2, n_hidden_3, n_output)
pred = multilayer_perceptron(x, weights, biases, input_keep_prob, hidden_keep_prob)
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=tf.argmax(y, 1)))
optimizer = tf.train.RMSPropOptimizer(learning_rate, 0.9).minimize(cost) # training algorithms: RMSprop
correct_predection = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predection, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver() #声明tf.train.Saver类用于保存模型
MSETime = np.zeros((training_epochs, 3))
with tf.Session() as sess:
sess.run(init)
start_time = time.time()
for epoch in range(training_epochs):#训练迭代10轮
for i in range(total_batch):
idx = np.random.randint(num_train, size=batch_size)#每一个批次训练就从训练数目中随机生成200个索引编号
if LRdecay == 1:
_, c = sess.run([optimizer, accuracy], feed_dict={x: X_train[idx, :], y: Y_train[idx, :],
input_keep_prob: 1, hidden_keep_prob: 1,
learning_rate: LR / (epoch + 1), is_train: True})
elif LRdecay == 0:
_, c = sess.run([optimizer, accuracy], feed_dict={x: X_train[idx, :], y: Y_train[idx, :],
input_keep_prob: 1, hidden_keep_prob: 1,
learning_rate: LR, is_train: True})
MSETime[epoch, 0] = c
MSETime[epoch, 1] = sess.run(accuracy,
feed_dict={x: X_val, y: Y_val, input_keep_prob: 1, hidden_keep_prob: 1,
is_train: False})
MSETime[epoch, 2] = time.time() - start_time
if epoch % (int(training_epochs / 10)) == 0: #改
print('epoch:%d, ' % epoch,
'test_accuracy:%0.2f%%.' % (c * 100))
print("training time: %0.2f s" % (time.time() - start_time))
sio.savemat('MSETime_%d_%d_%d' % (n_output, batch_size, LR * 10000),
{'train': MSETime[:, 0], 'validation': MSETime[:, 1], 'time': MSETime[:, 2]})
saver.save(sess, location) #将模型保存到location中的model_demo.ckpt文件中
# sio.savemat(save_name, {'pred': pred, 'Y': Y_train[idx, :]}) # 预测值来自训练集和验证集,因此前面的返回值一定要指定
return 0
# Functions for deep neural network testing
def test(X, Y, model_location, save_name, n_input, n_output, n_hidden_1=1000, n_hidden_2=800, n_hidden_3=1500):
tf.reset_default_graph()
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
is_train = tf.placeholder("bool")
input_keep_prob = tf.placeholder(tf.float32)
hidden_keep_prob = tf.placeholder(tf.float32)
weights, biases = ini_weights(n_input, n_hidden_1, n_hidden_2, n_hidden_3, n_output)
pred = multilayer_perceptron(x, weights, biases, input_keep_prob, hidden_keep_prob)
correct_predection = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predection, tf.float32))
saver = tf.train.Saver() #加载模型之前,同样要声明模型保存的类
with tf.Session() as sess: #不再运行变量的初始化过程
saver.restore(sess, model_location) #加载已经保存的模型,并通过模型中保存的变量值来计算预测值
start_time = time.time()
y_pred, test_accuracy = sess.run([pred, accuracy],
feed_dict={x: X, y: Y, input_keep_prob: 1, hidden_keep_prob: 1,
is_train: False})
testtime = time.time() - start_time
# print("testing time: %0.2f s" % testtime)
print("test_accuracy is %g" % test_accuracy)
sio.savemat(save_name, {'pred': y_pred, 'Y': Y})
return dnntime
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
DNN.py.zip (1个子文件)
DNN.py 7KB
共 1 条
- 1
资源评论
- weixin_586323872022-04-07用户下载后在一定时间内未进行评价,系统默认好评。
- 永不褪色的蓝2023-03-16这个资源对我启发很大,受益匪浅,学到了很多,谢谢分享~
- Lei-ger2022-04-25用户下载后在一定时间内未进行评价,系统默认好评。
余淏
- 粉丝: 52
- 资源: 3975
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功