function [para,obj,hist] = TK_optimize(x,d,para0,mode,eta,max_epochs,varargin)
%% Parse optional arguments
p = inputParser;
addParameter(p,'batch_size',1);
addParameter(p,'shuffle',false);
addParameter(p,'verbose',true);
addParameter(p,'early_stop_epoch',false);
addParameter(p,'N_early_stop',100);
addParameter(p,'early_stop_tol',false);
addParameter(p,'tol',1e-3);
addParameter(p,'displaysec',0.1);
parse(p,varargin{:});
% Extract optional arguments as variables if used often
batch_size = p.Results.batch_size;
%% Perform some sanity cheks
[N, ~] = size(x); % Number of training examples
if batch_size > N
error('Batch size = %d should be lower than the total amount of training examples = %d',batch_size,N);
end
if ~ ismember(mode,[0,1])
error('mode should be either 0 for FNN or 1 for TS-ANFIS');
end
%% Initialise optimization
hist = nan(1,max_epochs + 1);
obj_i = SSE(x,d,para0,mode); %Inital objective value
hist(1) = obj_i;
para_i = para0; % Parameters that are updated in the loop
obj = obj_i; % Lower training error (updated in loop)
para = para_i; % The parameters with lowest training error (updated in loop)
progress_count = 0; % Count for how many epochs the training error does not fall
% Compute how many batches the training data is split into
N_batches = floor(N/batch_size);
% The last batch might have to be bigger
last_batch_size = batch_size + N - batch_size*N_batches;
batch_sizes = [repmat(batch_size,1,N_batches - 1), last_batch_size];
lineLength = 0; % Used to print progress
time0 = clock; time0 = time0(6);
%% Run SQD for a specified number of epochs
for i = 1:max_epochs
% Shuffle training data if prompted - stochastic gradient descent
if p.Results.shuffle
perm = randperm(N);
else
perm = 1:N;
end
x_perm = x(perm,:);
d_perm = d(perm,:);
%% Run epoch
for j = 0:(N_batches - 1) % loop over each minibatch
%% Unpack parameters and initialise gradients
% Initialise gradients with the first training example in minibatch
x1 = x(j*batch_size+1,:);
d1 = d(j*batch_size+1,:);
switch mode
case 0 % FNN
[W,b,h_modes] = para_i{:};
% Get correct size gradients by 1 call of backpropagation
[g_W, g_b] = TK_FNN_grad(x1,d1,W,b,h_modes);
M = length(g_W); % Number of layers
case 1 % TS-ANFIS
[c,sigma,theta] = para_i{:};
% Get correct size gradients by 1 call of backpropagation
[g_c, g_sigma,g_theta] = TK_NF_grad(x1,d1,c,sigma,theta);
end
%% Run minibatches
for k = 2:batch_sizes(j+1) % loop over each training example in minibatch
idx = j*batch_size + k; % Current training example
xk = x_perm(idx,:);
dK = d_perm(idx,:);
switch mode % Get sub gradients for training example
case 0 % FNN
[g_W_k,g_b_k] = TK_FNN_grad(xk,dK,W,b,h_modes);
% add subgradiens to total gradient
for l = 1:M
g_W{l} = g_W{l} + g_W_k{l};
g_b{l} = g_b{l} + g_b_k{l};
end
case 1 % TS-ANFIS
[g_c_k, g_sigma_k,g_theta_k] = TK_NF_grad(...
xk,dK,c,sigma,theta);
% add subgradiens to total gradient
g_c = g_c + g_c_k;
g_sigma = g_sigma + g_sigma_k;
g_theta = g_theta + g_theta_k;
end
end
switch mode
case 0 % FNN
for l = 1:M
W{l} = W{l} - eta*g_W{l}./batch_size;
b{l} = b{l} - eta*g_b{l}./batch_size;
end
case 1 % TS-ANFIS
% Update via SQD
c = c - eta*g_c;
sigma = sigma - eta*g_sigma./batch_size;
theta = theta - eta*g_theta./batch_size;
end
% Finally pack parameters again
switch mode
case 0 % FNN
para_i = {W,b,h_modes};
case 1 % TS_ANFIS
para_i = {c,sigma,theta};
end
end
%% Record objective value
obj_i = SSE(x,d,para_i,mode);
hist(i + 1) = obj_i;
% Update the final output parameter if lower SSE is achieved
if obj_i < obj
obj = obj_i;
para = para_i;
progress_count = 0;
elseif p.Results.early_stop_epoch
progress_count = progress_count + 1;
if progress_count == p.Results.N_early_stop
fprintf('Training error has not been lowered for %d epochs. Stopping training!\n',progress_count);
break;
end
end
if p.Results.early_stop_tol && (obj <= p.Results.tol)
fprintf('Desired tolerence met with SSE = %.2e. Stopping training!\n',obj);
break;
end
if p.Results.verbose
time1 = clock; time1 = time1(6); % Get seconds
if time1 - time0 > p.Results.displaysec || i == max_epochs
time0 = time1;
% Set up progressbar
progress_percent = i/max_epochs;
tmp = round(40*progress_percent);
if i < max_epochs
arrow = '>';
else
arrow = '=';
end
progressbar = sprintf('|%s%s%s|',repmat('=',1,tmp-1),...
arrow,repmat(' ',1,40-tmp));
fprintf(repmat('\b',1,lineLength)); % Clear line
% Print all
lineLength = fprintf('Progress: %.0f%% %s Epoch: %d, SSE = %.2e',...
progress_percent*100,progressbar,i,obj);
end
end
end
if p.Results.verbose
fprintf('\n');
end
end
matlab-基于前馈神经网络(FNN)和模糊逻辑网络(NF)的时间序列预测matlab对比仿真-源码
版权申诉
71 浏览量
2021-09-16
19:25:53
上传
评论 1
收藏 27KB RAR 举报
mYlEaVeiSmVp
- 粉丝: 1876
- 资源: 19万+
最新资源
- 5ffd9193f6aec31bbf16030a46680dc7.avi
- DA14531-蓝牙传感器连接传输数据固件
- logisim实验MIPS运算器(ALU)设计(内含4位先行进位74182、四位快速加法器、32位快速加法器)-Educoder_logisim里面连线,实现4位先行进位74182和4位快速加法器-C
- 高等数学第一章第二节数列的极限
- Python 版冒泡排序算法源代码
- tensorflow-gpu-2.7.2-cp38-cp38-manylinux2010-x86-64.whl
- tensorflow-2.7.3-cp39-cp39-manylinux2010-x86-64.whl
- tensorflow-2.7.2-cp39-cp39-manylinux2010-x86-64.whl
- Python版本快速排序源代码
- 精品源码基于JSP实现的酒店管理系统
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈