import numpy as np
from K_Means import kMeans
from DBN import *
import csv
import tensorflow as tf
from switch_case import switch
training_set_k = []
reader = csv.reader(open('pemstrain.csv','r'))
for row in reader:
training_set_k.append(row)
#print(training_set_k[1][0])
training_set_1 = [[training_set_k[0][0],training_set_k[1][0],training_set_k[2][0],training_set_k[3][0]],
[training_set_k[0][1],training_set_k[1][1],training_set_k[2][1],training_set_k[3][1]],
[training_set_k[0][2],training_set_k[1][2],training_set_k[2][2],training_set_k[3][2]],
[training_set_k[0][3],training_set_k[1][3],training_set_k[2][3],training_set_k[3][3]],
[training_set_k[0][4],training_set_k[1][4],training_set_k[2][4],training_set_k[3][4]],
[training_set_k[0][5],training_set_k[1][5],training_set_k[2][5],training_set_k[3][5]],
[training_set_k[0][6],training_set_k[1][6],training_set_k[2][6],training_set_k[3][6]],
[training_set_k[0][7],training_set_k[1][7],training_set_k[2][7],training_set_k[3][7]],
[training_set_k[0][8],training_set_k[1][8],training_set_k[2][8],training_set_k[3][8]],
[training_set_k[0][9],training_set_k[1][9],training_set_k[2][9],training_set_k[3][9]]]
training_set_1n = np.array(training_set_1,dtype="float64")
reader = csv.reader(open('pemstrainlabel.csv','r'))
training_label_k = []
for row in reader:
training_label_k.append(row)
# print(training_label_k[0][0])
training_label_1 = [[training_label_k[0][0],training_label_k[1][0],training_label_k[2][0],training_label_k[3][0]],
[training_label_k[0][1], training_label_k[1][1], training_label_k[2][1], training_label_k[3][1]],
[training_label_k[0][2], training_label_k[1][2], training_label_k[2][2], training_label_k[3][2]],
[training_label_k[0][3], training_label_k[1][3], training_label_k[2][3], training_label_k[3][3]],
[training_label_k[0][4], training_label_k[1][4], training_label_k[2][4], training_label_k[3][4]],
[training_label_k[0][5], training_label_k[1][5], training_label_k[2][5], training_label_k[3][5]],
[training_label_k[0][6], training_label_k[1][6], training_label_k[2][6], training_label_k[3][6]],
[training_label_k[0][7], training_label_k[1][7], training_label_k[2][7], training_label_k[3][7]],
[training_label_k[0][8], training_label_k[1][8], training_label_k[2][8], training_label_k[3][8]],
[training_label_k[0][9], training_label_k[1][9], training_label_k[2][9], training_label_k[3][9]]]
training_label_1n = np.array(training_label_1,dtype="float64")
#print(training_label_1n.dtype)
testing_set_k = []
reader = csv.reader(open('pemstest.csv','r'))
for row in reader:
testing_set_k.append(row)
#print(testing_set_k[1][0])
testing_set_1 = [[testing_set_k[0][0],testing_set_k[1][0],testing_set_k[2][0],testing_set_k[3][0]],
[testing_set_k[0][1],testing_set_k[1][1],testing_set_k[2][1],testing_set_k[3][1]]]
testing_set_1n = np.array(testing_set_1,dtype="float64")
reader = csv.reader(open('pemstrainlabel.csv','r'))
testing_label_k = []
for row in reader:
testing_label_k.append(row)
# print(testing_label_k[0][0])
testing_label_1 = [[testing_label_k[0][0],testing_label_k[1][0],testing_label_k[2][0],testing_label_k[3][0]],
[testing_label_k[0][1], testing_label_k[1][1], testing_label_k[2][1], testing_label_k[3][1]]]
testing_label_1n = np.array(testing_label_1,dtype="float64")
#print(training_set_1)
dbn = DBN(input=training_set_1n,label=training_label_1n,n_ins=4 , hidden_layer_sizes=[128,128,128], n_outs= 4)
dbn.pretrain(lr=0.3,k=1,epochs=40)
dbn.finetune(0.1,40)
# print(dbn.get_last_layer().W)
center,ClusterAssiment = kMeans(dbn.rbm_layers[-1].W,k=2)
# print(ClusterAssiment)
# print(counter)
# print(dbn.log_layer.W.T)
training_set_1t1 = []
training_set_1t2 = []
training_label_1t1 = []
training_label_1t2 = []
testing_set_1t1 = []
testing_set_1t2 = []
counter1=[]
counter2 = []
for i in range(len(dbn.log_layer.W.T)):
if ClusterAssiment[i,0] == 0.0:
training_set_1t1.append(training_set_1n[:,i])
training_label_1t1.append(training_label_1n[:,i])
testing_set_1t1.append(testing_set_1n[:,i])
counter1.append(i)
else:
training_set_1t2.append(training_set_1n[:,i])
training_label_1t2.append(training_label_1n[:,i])
testing_set_1t2.append(testing_set_1n[:,i])
counter2.append(i)
training_set_1t1_n = np.array(training_set_1t1)
training_set_1t2_n = np.array(training_set_1t2)
training_label_1t1_n = np.array(training_label_1t1)
training_label_1t2_n = np.array(training_label_1t2)
# print(training_label_1t1_n.T)
# print(training_label_1t1_n[0,:])
# print(training_set_1n)
# print(len(training_set_1t1_n))
# print(training_set_1t2_n.T)
# print(training_label_1t1_n.T)
if len(training_set_1t1_n)!=0 and len(training_set_1t2_n)!=0:
dbn1 = DBN(input=training_set_1t1_n.T,label=training_label_1t1_n.T,n_ins=len(training_set_1t1_n),hidden_layer_sizes=[16],n_outs=len(training_set_1t1_n))
dbn1.pretrain(0.3,1,2)
dbn1.finetune(0.1,2)
dbn2 = DBN(input=training_set_1t2_n.T,label=training_label_1t2_n.T,n_ins=len(training_set_1t2_n),hidden_layer_sizes=[16],n_outs=len(training_set_1t2_n))
dbn2.pretrain(0.3,1,2)
dbn2.finetune(0.1,2)
elif len(training_set_1t1_n)==0:
dbn2 = DBN(input=training_set_1t2_n.T, label=training_label_1t2_n.T, n_ins=len(training_set_1t2_n),
hidden_layer_sizes=[128,128,128], n_outs=len(training_set_1t2_n))
dbn2.pretrain(0.3, 2)
dbn2.finetune()
else:
dbn1 = DBN(input=training_set_1t1_n.T, label=training_label_1t1_n.T, n_ins=len(training_set_1t1_n),
hidden_layer_sizes=[128,128,128], n_outs=len(training_set_1t1_n))
dbn1.pretrain(0.3, 2)
dbn1.finetune()
print(dbn.log_layer.W)
def share_train(epchos,training_x1,training_x2,initial_shared_weights_1,initial_shared_weights_2,training_label_1,training_label_2,Y1_shared_weights,Y2_shared_weights,Y3_shared_weights,Y4_shared_weights):
d_y_1 = []
d_y_2 = []
y1_given_x1 = softmax(np.dot(training_x1, initial_shared_weights_1))
y2_given_x1 = softmax(np.dot(training_x2, initial_shared_weights_2))
# y_given_x1 = softmax(np.dot(training_x,initial_shared_weights))
# print(initial_shared_weights_1.shape)
# print(Y1_shared_weights.shape)
# print(training_x1.shape)
# print(training_x2.shape)
# print(y1_given_x1.shape)
# print(training_label_1t1_n.T.shape)
# print(y1_given_x1.shape)
for epcho in range(epchos):
# p_y_given_x1 = softmax(np.dot(y_given_x1,Y1_shared_weights))
for i in range(len(counter1)):
if counter1[i] == 0:
p_y1_given_x_1 = softmax(np.dot(y1_given_x1, Y1_shared_weights))
d_y1 = training_label_1[i,:].T.reshape(10,1)-p_y1_given_x_1
# d_y_1.append(d_y1)
# print(p_y1_given_x_1.shape)
Y1_shared_weights += 0.1 * np.dot(y1_given_x1.T,d_y1)
elif counter1[i] == 1:
p_y2_given_x_1 = softmax(np.dot(y1_given_x1, Y2_shared_weights))
d_y2 = training_label_1[i,:].T.reshape(10,1)-p_y2_given_x_1
# d_y_1.append(d_y2)
# print(p_y2_given_x_1.shape)
Y2_shared_weights += 0.1 * np.dot(y1_given_x1.T,d_y2)
elif counter1[i] == 2:
p_y3_given_x_1 = softmax(np.dot(y1_given_x1, Y3_shared_weights))
d_y3 = training_label_1[i,:].T.reshape(10,1)-p_y3_given_x_1
# d_y_1.append(d_y3)
# print(p_y3_given_x_1.shape)
Y3_shared_weights += 0.1 * np.dot(y1_given_x1.T,d_y3)
elif counter1[i] == 3:
p_y4_given_x_1 = softmax(np
没有合适的资源?快使用搜索试试~ 我知道了~
DBN Python预测交通流
共36个文件
py:14个
csv:8个
pyc:8个
4星 · 超过85%的资源 需积分: 49 209 下载量 80 浏览量
2017-11-01
10:30:24
上传
评论 22
收藏 52KB ZIP 举报
温馨提示
python 实现的基于DBN的交通流预测系统,为方便验证已给出实验,绝对可用
资源推荐
资源详情
资源评论
收起资源包目录
code.zip (36个子文件)
code
switch_case.py 393B
DBN.py 5KB
pemstestlabel.csv 22KB
test02.py 80B
RBM.py 5KB
NN.py 4KB
utils.py 489B
Calculate.py 0B
pemstrainlabel.csv 2KB
test.py 10KB
._.DS_Store 4KB
__pycache__
HiddenLayer.cpython-35.pyc 2KB
K_Means.cpython-35.pyc 1KB
DBN.cpython-35.pyc 3KB
switch_case.cpython-35.pyc 759B
NN.cpython-35.pyc 4KB
LogisticRegression.cpython-35.pyc 3KB
RBM.cpython-35.pyc 4KB
utils.cpython-35.pyc 1KB
MTL.py 373B
LogisticRegression.py 2KB
.DS_Store 8KB
test01.py 12KB
._pemstrain.csv 4KB
._pemstestlabel.csv 4KB
Task_Grouping.py 13KB
.idea
code.iml 459B
misc.xml 213B
modules.xml 260B
workspace.xml 59KB
pemstrain.csv 2KB
HiddenLayer.py 2KB
pemstest.csv 486B
K_Means.py 2KB
._pemstest.csv 4KB
._pemstrainlabel.csv 4KB
共 36 条
- 1
m_1574238251
- 粉丝: 1
- 资源: 1
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
前往页