import random
import numpy as np
import matplotlib.pyplot as plt
random.seed(0) # 使random函数每一次生成的随机数值都相等
def rand(a, b):
return (b - a) * random.random() + a # 生成a到b的随机数
def make_matrix(m, n, fill=0.0):
mat = []
for i in range(m):
mat.append([fill] * n) # 生成m行n列的0矩阵
return mat
def sigmoid(x):
#return (1.0 - np.exp(-x)) / (1.0 + np.exp(-x)) #sigmoid双极型型激励函数
return (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x)) # tanh型激励函数
def sigmoid_derivative(x):
return 0.5* (1 - x*x)#sigmoid激励函数的导数
#return 1.0 - x * x # tanh激励函数的导数
class BPNeuralNetwork:
# 初始化参数
def __init__(self):
self.input_n = 0
self.hidden_n = 0
self.hidden_second_n = 0
self.output_n = 0
self.input_cells = []
self.hidden_cells = []
self.hidden_second_cells = []
self.output_cells = []
self.input_weights = []
self.hidden_weights=[]
self.output_weights = []
self.bais_h=[]
self.bais_hs=[]
self.bais_o =[]
def setup(self, ni, nh, nhs, no):
self.input_n = ni + 1
self.hidden_n = nh
self.hidden_second_n = nhs
self.output_n = no
# 初始化神经元,建立相应的向量
self.input_cells = [1.0] * self.input_n
self.hidden_cells = [1.0] * self.hidden_n
self.hidden_second_cells = [1.0] * self.hidden_second_n
self.output_cells = [1.0] * self.output_n
# 初始化话权矩阵
self.input_weights = make_matrix(self.input_n, self.hidden_n) # 初始化输入权函数矩阵初始为零矩阵
self.hidden_weights = make_matrix(self.hidden_n,self.hidden_second_n)
self.output_weights = make_matrix(self.hidden_second_n, self.output_n) # 初始化输出权函数矩阵
self.bais_h=make_matrix(self.hidden_n,1)
self.bais_o=make_matrix(self.output_n,1)
self.bais_hs = make_matrix(self.hidden_second_n, 1)
# 随机激励
for i in range(self.input_n):
for h in range(self.hidden_n):
self.input_weights[i][h] = rand(-1.0,1.0) # 对输入权矩阵随机赋值
for h in range(self.hidden_n):
for o in range(self.hidden_second_n):
self.hidden_weights[h][o] = rand(-1.0,1.0) # 对输出权矩阵随机赋值
for o in range(self.hidden_second_n):
for k in range(self.output_n):
self.output_weights[o][k] = rand(-1.0, 1.0) # 对输出权矩阵随机赋值
for jj in range(self.hidden_n):
self.bais_h[jj]= rand(-1.0,1.0)
for dd in range(self.hidden_second_n):
self.bais_hs[dd] = rand(-1.0, 1.0)
for pp in range(self.output_n):
self.bais_o[pp] = rand(-1.0, 1.0)
# 前馈预测函数
def predict(self, inputs):
# 刺激输入层
for i in range(self.input_n - 1):
self.input_cells[i] = inputs[i]
# 刺激隐藏
for j in range(self.hidden_n):
total = 0.0
for i in range(self.input_n):
total += self.input_cells[i] * self.input_weights[i][j]
self.hidden_cells[j] = sigmoid(total+self.bais_h[j])
for k in range(self.hidden_second_n):
total = 0.0
for j in range(self.hidden_n):
total += self.hidden_cells[j] * self.hidden_weights[j][k]
self.hidden_second_cells[k] = sigmoid(total + self.bais_hs[k])
for o in range(self.output_n):
total = 0.0
for k in range(self.hidden_second_n):
total += self.hidden_second_cells[k] * self.output_weights[k][o]
self.output_cells[o] = sigmoid(total+self.bais_o[o])
return self.output_cells[:] # 返回输出层的值
# 反馈函数
def back_propagate(self, case, label, learn):
# 前项传递
self.predict(case) # 给前馈预测函数赋值
learn_o=learn_h=learn_hs=learn
# 获得输出层误差
output_deltas = [0.0] * self.output_n # 输出增量的向量
for o in range(self.output_n):
error = label[o] - self.output_cells[o] # 训练值减输出值得到输出误差
c=output_deltas[o]
output_deltas[o] = sigmoid_derivative(self.output_cells[o]) * error # 样本输出减输出层输出再乘增量
if(output_deltas[o]<c):
learn_o = learn_o * 0.7
elif(output_deltas[o]>c):
learn_o = 1.05 * learn_o
else:
learn_o = learn_o
# 获得隐藏第二层误差
hidden_second_deltas = [0.0] * self.hidden_second_n
for h in range(self.hidden_second_n):
error = 0.0
for o in range(self.output_n):
error += output_deltas[o] * self.output_weights[h][o]
cc=hidden_second_deltas[h]
hidden_second_deltas[h] = sigmoid_derivative(self.hidden_second_cells[h]) * error
if (hidden_second_deltas[h]< cc):
learn_hs = learn_hs * 0.7
elif (hidden_second_deltas[h] > cc):
learn_hs = 1.05 * learn_hs
else:
learn_hs = learn_hs
# 获得隐藏层误差
hidden_deltas = [0.0] * self.hidden_n
for j in range(self.hidden_n):
error = 0.0
for h in range(self.hidden_second_n):
error += hidden_second_deltas[h] * self.hidden_weights[j][h]
ccc=hidden_deltas[j]
hidden_deltas[j] = sigmoid_derivative(self.hidden_cells[j]) * error
if (hidden_deltas[j]< ccc):
learn_h = learn_h * 0.7
elif (hidden_deltas[j] > ccc):
learn_h = 1.05 * learn_h
else:
learn_h = learn_h
# 更新输出层权系数
for h in range(self.hidden_second_n):
for o in range(self.output_n):
change = output_deltas[o] * self.hidden_second_cells[h]
self.output_weights[h][o] = self.output_weights[h][o] +learn_o * change
# 更新隐藏第二层权系数
for j in range(self.hidden_n):
for h in range(self.hidden_second_n):
change = hidden_second_deltas[h] * self.hidden_cells[j]
self.hidden_weights[j][h] = self.hidden_weights[j][h] + learn_hs * change
# 更新输入权重
for i in range(self.input_n):
for h in range(self.hidden_n):
change = hidden_deltas[h] * self.input_cells[i]
self.input_weights[i][h]=self.input_weights[i][h]+learn_h * change
for i in range(self.hidden_n):
self.bais_h[i] = 0.05 * learn_h * hidden_deltas[i]+0.95*self.bais_h[i]
for k in range(self.hidden_second_n):
self.bais_hs[k] = 0.05 * learn_hs * hidden_second_deltas[k] + 0.95 * self.bais_hs[k]
for j in range(self.output_n):
self.bais_o[j] = 0.05*learn_o*output_deltas[j]+0.95*self.bais_o[j]
# 得到全局变量方差
error = 0.0
for o in range(len(label)):
error += 0.5 * (label[o] - self.output_cells[o]) ** 2
return error
def train(self, cases, labels, limit, learn):
for j in range(limit):
error = 0.0
for i in range(len(cases)):
label = labels[i]
case = cases[i]
error += self.back_propagate(case, label, learn)
def test(self):
#a = np.loadtxt('test_data.txt')
#test_d = np.matrix(a).T
#xxx = test_d [0].T
#yyy = test_d [