'''
Created on 2020年6月15日
@author: 紫薇星君
'''
import numpy
import pandas as pd
from RBM import RBM
from utils import *
from sklearn import model_selection
from HiddenLayer import HiddenLayer
from LogisticRegression import LogisticRegression
from sklearn.metrics import r2_score,mean_squared_error
class F():
def __init__(self,x):
self.x=x
self.mi=x.min()
self.ma=x.max()
def f(self,a):
return (a-self.mi)/(self.ma-self.mi)
def f_b(self,b):
return b*(self.ma-self.mi)+self.mi
class DBN(object):
def __init__(self, input=None, label=None,n_ins=2, hidden_layer_sizes=[3, 3], n_outs=2,rng=None):
#input训练集特征, label训练集标签,n_ins可见层单元数, hidden_layer_sizes每层隐层的单元数列表, n_outs输出层层数,rng随机数种子
self.x = input #input训练集特征
self.y = label #label训练集标签
self.sigmoid_layers = [] #储存前项传递图,用来构建MLP
self.rbm_layers = [] #储存用来预训练MLP每层的RBM
self.n_layers = len(hidden_layer_sizes) #模型深度,隐层层数
if rng is None:
rng = numpy.random.RandomState(1234)
assert self.n_layers > 0 #断言隐层层数大于0,否则为异常
# construct multi-layer
for i in range(self.n_layers): #对每一层而言
# layer_size
if i == 0:
input_size = n_ins #第一个隐层输入为可见层,第一隐层维度即可见层单元数
else:
input_size = hidden_layer_sizes[i - 1] #从第二个隐层开始,隐层输入为上一隐层的输出,输入维度即上一隐层的输出单元数
# layer_input
if i == 0:
layer_input = self.x #第一隐层输入即是训练数据集特征
else:
#从第二个隐层开始,隐层输入为上一隐层的输出,在此循环里上一层的输出每次都是 self.sigmoid_layers大列表的最后一个参数
layer_input = self.sigmoid_layers[-1].sample_h_given_v()
# 依据当前层的参数建立一个隐层,并装入 self.sigmoid_layers列表,用于构建整体微调BP网络或MLP网络
sigmoid_layer = HiddenLayer(input=layer_input,n_in=input_size,n_out=hidden_layer_sizes[i],rng=rng,activation=sigmoid)
self.sigmoid_layers.append(sigmoid_layer)
# 依据当前层的参数建立一层RBM,并装入self.rbm_layers大列表,用于网络快速预训练。
rbm_layer = RBM(input=layer_input,n_visible=input_size,n_hidden=hidden_layer_sizes[i],
W=sigmoid_layer.W,hbias=sigmoid_layer.b) # W, b are shared
self.rbm_layers.append(rbm_layer)
#在完成隐层网络构建后,给网络添加最后一层回归用层,为 LogisticRegression层。
self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
label=self.y,n_in=hidden_layer_sizes[-1],n_out=n_outs)
# 确定微调成本函数,为logistic回归层的负对数似然函数
self.finetune_cost = self.log_layer.negative_log_likelihood()
def pretrain(self, lr=0.1, k=1, epochs=100): #各RBM层预训练函数
# pre-train layer-wise
for i in range(self.n_layers):#逐层训练
if i == 0:
layer_input = self.x
print(layer_input)
else:
layer_input = self.sigmoid_layers[i-1].sample_h_given_v(layer_input)
rbm = self.rbm_layers[i]
for epoch in range(epochs):
rbm.contrastive_divergence(lr=lr, k=k, input=layer_input)
if epoch % 1 == 0:
cost = rbm.get_reconstruction_cross_entropy()
print ('Pre-training layer %d, epoch %d, cost ' %(i, epoch), cost)
return
def finetune(self, lr=0.1, epochs=100): #最终整体网络微调函数
layer_input = self.sigmoid_layers[-1].sample_h_given_v()
# train log_layer
epoch = 0
while (epoch < epochs):
self.log_layer.train(lr=lr, input=layer_input)
# if epoch % 50 == 0:
# self.finetune_cost = self.log_layer.negative_log_likelihood()
# print(' Fine tuning training epoch %d, cost is ' % epoch, self.finetune_cost)
lr *= 0.95
epoch += 1
return
def predict(self, x): #前向计算
layer_input = x
for i in range(self.n_layers):
sigmoid_layer = self.sigmoid_layers[i]
layer_input = sigmoid_layer.output(input=layer_input)
out = self.log_layer.predict(layer_input)
return out
def test_dbn(pretrain_lr=0.2, pretraining_epochs=5000, k=1,finetune_lr=0.5, finetune_epochs=1000):
#pretrain_lr多层RBM预训练学习率, pretraining_epochs多层RBM预训练步数, k=1一步Gibbs采样,finetune_lr微调学习率, finetune_epochs微调学习步长
x = numpy.array([[1,1,1,0,0,0],[1,0,1,0,0,0],[1,1,1,0,0,0],[0,0,1,1,1,0],[0,0,1,1,0,0],[0,0,1,1,1,0]]) #训练数据特征
y = numpy.array([[1, 0],[1, 0],[1, 0],[0, 1],[0, 1],[0, 1]]) #训练数据标签
rng = numpy.random.RandomState(123) #设置随机数种子
# construct DBN
dbn = DBN(input=x, label=y, n_ins=6, hidden_layer_sizes=[3,3,3], n_outs=2, rng=rng) #建立DBN网络
# pre-training (TrainUnsupervisedDBN)
dbn.pretrain(lr=pretrain_lr, k=1, epochs=pretraining_epochs) # DBN网络预训练
# fine-tuning (DBNSupervisedFineTuning)
dbn.finetune(lr=finetune_lr, epochs=finetune_epochs) #DBN网络微调
# test
x = numpy.array([[1,1,1,0,0,0],[1,0,1,0,0,0],[1,1,1,0,0,0],[0,0,1,1,1,0],[0,0,1,1,0,0],[0,0,1,1,1,0]])#预测数据集
print (dbn.predict(x)) #打印预测结果
return
def datadealing(road):
file=pd.read_csv(road)
t=numpy.array(file['T'])
co=numpy.array(file['CO'])
pn=numpy.array(file['Pn'])
pfd=numpy.array(file['PFD'])
fo=numpy.array(file['Fo'])
fv=numpy.array(file['Fv'])
ft=F(t)
t1=ft.f(t)
fco=F(co)
co1=fco.f(co)
fpn=F(pn)
pn1=fpn.f(pn)
fpfd=F(pfd)
pfd1=fpfd.f(pfd)
ffo=F(fo)
fo1=ffo.f(fo)
ffv=F(fv)
fv1=ffv.f(fv)
data_X=numpy.column_stack((pfd1,co1,t1,fv1,fo1))
data_Y=pn1
data_Y=data_Y.reshape(len(data_X),1)
x_tr,x_te,y_tr,y_te=model_selection.train_test_split(data_X,data_Y,train_size=0.9)
return x_tr,x_te,y_tr,y_te
def test_pn_flu(pretrain_lr=0.5, pretraining_epochs=10, k=1,finetune_lr=0.5, finetune_epochs=10):
road='.\datafile\eggplantquandealing.csv'
x_tr,x_te,y_tr,y_te=datadealing(road)
rng = numpy.random.RandomState(123)
dbn = DBN(input=x_tr, label=y_tr, n_ins=5, hidden_layer_sizes=[5,5,5,5], n_outs=1, rng=rng)
dbn.pretrain(lr=pretrain_lr, k=1, epochs=pretraining_epochs)
dbn.finetune(lr=finetune_lr, epochs=finetune_epochs)
# test
y_calcu=dbn.predict(x_te)
y_ce=numpy.squeeze(y_te)
y_calcu=numpy.squeeze(y_calcu)
print(y_ce)
print(y_calcu)
R2=r2_score(y_ce,y_calcu)
MSE=mean_squared_error(y_ce, y_calcu)
print('测试集归一化数据决定系数R2={},均方差MSE={}'.format(R2,MSE))
return
if __name__ == "__main__":
test_pn_flu()
DBN_;DBN_nan_深度信念_dbn回归_深度信念网络
版权申诉
5星 · 超过95%的资源 149 浏览量
2021-09-11
10:08:54
上传
评论 3
收藏 17KB ZIP 举报
心梓
- 粉丝: 819
- 资源: 8056
最新资源
- 电力场景安全帽检测数据集VOC+YOLO格式295张2类别.7z
- MISC图片隐写MISC图片隐写MISC图片隐写MISC图片隐写MISC图片隐写.txt
- 七维大脑原理:探索人类心智的多元维度.txt
- 电力场景设备漏油检测数据集VOC+YOLO格式338张1类别.7z
- 基于yolov8+pyqt5实现精美界面支持图片视频和摄像检测源码.zip
- 用C语言为母亲节献上一份特别的祝福.zip
- LCD1602液晶显示屏的深入探索与实用指南.zip
- 基于Matlab人脸肤色定理的教师人数统计+源代码+全部数据+文档说明+详细注释+使用说明+截图(高分课程设计)
- 基于Matlab霍夫曼变换的表盘读数识别+源代码+全部数据+文档说明+详细注释+使用说明+截图(高分课程设计)
- 基于Matlab火灾烟雾检测源码带GUI界面+源代码+全部数据+文档说明+详细注释+使用说明+截图(高分课程设计)
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
- 1
- 2
前往页