function [nn, L] = nntrain(nn, train_x, train_y, opts)
m = size(train_x, 1);
batchsize = opts.batchsize;
numepochs = opts.numepochs;
numbatches = floor(m / batchsize);
L = zeros(numepochs*numbatches,1);
n = 1;
for k = 1 : numepochs
tic;
kk = randperm(m);
for j = 1 : numbatches
batch_x = train_x(kk((j - 1) * batchsize + 1 : j * batchsize), :);
batch_y = train_y(kk((j - 1) * batchsize + 1 : j * batchsize), :);
nn = nnff(nn, batch_x, batch_y);
nn = nnbp(nn);
nn = nngrads(nn);
L(n) = nn.loss;
n = n + 1;
end
t = toc;
nn = nnff(nn, train_x, train_y);
str_perf = sprintf('; Full-batch train err = %f', nn.loss);
disp(['NN train: epoch ' num2str(k) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
end
end
function nn = nnff(nn, x, y)
n = nn.n;
m = size(x, 1);
x = [ones(m,1) x];
nn.a{1} = x;
%feedforward pass
for i = 2 : n-1
nn.a{i} = sigm(nn.a{i - 1} * nn.W{i - 1}');
%Add the bias term
nn.a{i} = [ones(m,1) nn.a{i}];
end
nn.a{n} = sigm(nn.a{n - 1} * nn.W{n - 1}');
%error and loss
nn.error = y - nn.a{n};
nn.loss = 1/2 * sum(sum(nn.error .^ 2)) / m;
function X = sigm(P)
X = 1./(1+exp(-P));
end
end
function nn = nnbp(nn)
n = nn.n;
d{n} = - nn.error .* (nn.a{n} .* (1 - nn.a{n}));
for i = (n - 1) : -1 : 2
% Derivative of the activation function
d_act = nn.a{i} .* (1 - nn.a{i});
% Backpropagate first derivatives
if i+1==n % in this case in d{n} there is not the bias term to be removed
d{i} = (d{i + 1} * nn.W{i}) .* d_act; % Bishop (5.56)
else % in this case in d{i} the bias term has to be removed
d{i} = (d{i + 1}(:,2:end) * nn.W{i}) .* d_act;
end
end
for i = 1 : (n - 1)
if i+1==n
nn.dW{i} = (d{i + 1}' * nn.a{i}) / size(d{i + 1}, 1);
else
nn.dW{i} = (d{i + 1}(:,2:end)' * nn.a{i}) / size(d{i + 1}, 1);
end
end
end
function nn = nngrads(nn)
for i = 1 : (nn.n - 1)
dW = nn.dW{i};
dW = nn.learning_rate * dW;
if(nn.momentum>0)
nn.vW{i} = nn.momentum*nn.vW{i} + dW;
dW = nn.vW{i};
end
nn.W{i} = nn.W{i} - dW;
end
end
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
计算机视觉-BP学习MATLAB源码 BP算法就是目前使用较为广泛的一种参数学习算法. BP(back propagation)神经网络是1986年由Rumelhart和McClelland为首的科学家提出的概念,是一种按照误差逆向传播算法训练的多层前馈神经网络。 BP算法是建立在BP神经网络(多层前馈网络)上的一种算法,它根据神经网络的路径进行一层一层的运算,这个运算包括信号的正向传播和误差的反向传播两部分。
资源详情
资源评论
资源推荐
收起资源包目录
15、BP学习.zip (4个子文件)
15、BP学习
mnist.mat 14.05MB
nntrain.m 2KB
BPLearning.m 1KB
nntest.m 688B
共 4 条
- 1
mozun2020
- 粉丝: 1w+
- 资源: 131
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
评论0