import numpy as np
from collections import OrderedDict
from net_layer import *
from deeplearning.fuction import numerical_gradient
class TwoLayerNet:
def __init__(self,input_size,hidden_size,output_size,
weight_init_std=0.01):
#初始化权重
self.params = {}
self.params['W1'] = weight_init_std*np.random.randn(input_size,hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std*np.random.randn(hidden_size,output_size)
self.params['b2'] = np.zeros(output_size)
"""
OrderedDict是有序字典
"""
#生成层
self.layers = OrderedDict()
self.layers['Affine1'] = Affine(self.params['W1'],self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = Affine(self.params['W2'],self.params['b2'])
self.lastLayer = SoftMaxTithLoss()
#有序字典的作用 体现在向前传播
def predict(self,x):
for layer in self.layers.values():
x = layer.forward(x)
return x
#损失函数 我们以前得到的y是softmax之后的,现在的predict并没有经过softmax
def loss(self,x,t):
y = self.predict(x)
return self.lastLayer.forward(y,t)
def accuracy(self,x,t):
y = self.predict(x) #y获得得分情况
y = np.argmax(y,axis = 1) #没经过softmax y获得最高得分
if t.ndim != 1 : #如果t是1维 axis=1会出错,mini_batch不会是1维
t = np.argmax(t,axis = 1)
acuracy = np.sum((y==t)/float(x.shape[0]))
return acuracy
#数值微分法求梯度
def numerical_gradient(self,x,t):
loss_W = lambda W:self.loss(x,t)
grads = {}
grads['W1'] = numerical_gradient(loss_W,self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
#误差反向传播求梯度
def gradient(self,x,t):
#forward 向前传播就是获取损失函数值的过程
self.loss(x,t)
#backward
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
grads = {}
grads['W1'] = self.layers['Affine1'].dW
grads['b1'] = self.layers['Affine1'].db
grads['W2'] = self.layers['Affine2'].dW
grads['b2'] = self.layers['Affine2'].db
return grads
评论10