close all;
clear all;
clc;
TT=10;K=1.0;Tao=5;N=3;dt=1;st=300;lp=st/dt;
aa = exp(-dt/TT);bb=1-aa;
u1=0;u2=0;
y1=0;y2=0;
x1=0;x2=0;x3=0;
t=0;
TDL=N;
for i=1:lp
u1(i)=2*rand(1,1);%用来训练1——2
%u2(i)=sign(sign(i*0.01));
u2(i)=1;%用来测试,阶跃
end
for i=1:lp
x1=aa*x1+K*bb*u1(i);%将原对象分解为两个惯性环节,再按在零阶保持器下求差分方程。
x2=aa*x2+1*bb*x1;
x3=aa*x3+1*bb*x2;
y1(i)=x3;%训练部分数据的实际输出
t(i)=i*dt;
end
x1=0;
x2=0;
x3=0;
for i=1:lp
x1=aa*x1+K*bb*u2(i);
x2=aa*x2+1*bb*x1;
x3=aa*x3+1*bb*x2;
y2(i)=x3;
end
y3=y1;y4=y2;
for i=1:lp%加上纯滞后
if (lp-(i+Tao/dt)+1)<1
y3(lp-i+1)=0;
y4(lp-i+1)=0;
else
y3(lp-i+1)=y1(lp-i+1-Tao/dt);
y4(lp-i+1)=y2(lp-i+1-Tao/dt);
end
end
bestW1=0;
bestW2=0;
datain=[];
dataout=[];
testin=[];
testout=[];
number = lp-TDL;
bestE=inf;
for i=TDL+1:lp
newin=[];
newin1=[];
for j=1:TDL+1
newin = [newin;u1(i-j+1)];
newin1 = [newin1;u2(i-j+1)];
end
for j=1:TDL
newin = [newin;y3(i-j)];
newin1 = [newin1;y4(i-j)];
end
datain=[datain newin];%输入层为u(k),y(k+1)
dataout=[dataout y3(i)];%期望输出y(k)
testin=[testin newin1];
testout=[testout y4(i)];
end
indim=2*TDL+1;%输入层神经元个数为阶次乘以2+1
outdim=1;%输出层神经元个数为1
hiddenunitnum=10;%隐含层神经元个数为10
maxepochs=30000; %对象的迟延或时间越大,迭代次数越多
lr=0.001;%建议小于0.001
alpha=0.9;%动量项系数
E0=0.1e-010;%设定期望误差最小值
%%%%%%%%初始化输出层加权系数,隐层加权系数(wki,wij)%%%%%%%
w1=0.2*rands(hiddenunitnum,indim)-0.1;%w1隐层加权系数
b1=0.2*rands(hiddenunitnum,1)-0.1;%隐含层阈值
w2=0.2*rands(outdim,hiddenunitnum)-0.1;%w2输出层加权系数
b2=0.2*rands(outdim,1)-0.1;%输出层阈值
w1ex=[w1 b1];
w2ex=[w2 b2];
dw1ex=zeros(size(w1ex));
dw2ex=zeros(size(w2ex));
datainex=[datain' ones(number,1)]';
errorhistory=[];
for k=1:maxepochs
hiddenout=tansig(w1ex*datainex);
hiddenoutex=[hiddenout' ones(number,1)];
networkout=w2ex*hiddenoutex';
error=dataout-networkout;
SSE=sumsqr(error);
errorhistory=[errorhistory SSE];
if bestE>SSE
bestW1=w1ex;
bestW2=w2ex;
end
if SSE<E0
break;
end
%调整输出层加权系数w2和隐含层加权系数w1
delta2=error;
delta1=w2'*delta2.*(1-hiddenout).*(1-hiddenout);
delta1 = .5*delta1;
dw1ex0=lr*dw1ex;
dw2ex0=lr*dw2ex;
dw2ex=delta2*hiddenoutex;
dw1ex=delta1*datainex';
w1ex=w1ex+lr*dw1ex+alpha*dw1ex0;
w2ex=w2ex+lr*dw2ex+alpha*dw2ex0;
w2=w2ex(:,1:hiddenunitnum);
end
w1=bestW1(:,1:indim);
b1=bestW1(:,indim+1);
w2=bestW2(:,1:hiddenunitnum);
b2=bestW2(:,1+hiddenunitnum);
testhiddenout=tansig(w1*datain+repmat(b1,1,number));
testnnout=w2*testhiddenout+repmat(b2,1,number);
testhiddenout1=tansig(w1*testin+repmat(b1,1,number));
testnnout1=w2*testhiddenout1+repmat(b2,1,number);
y3=y3(TDL+1:TDL+number);
y4=y4(TDL+1:TDL+number);
t=t(TDL+1:TDL+number);
subplot(3,1,1)
plot(t,y3,'k-',t,testnnout,'r-');
legend('实际系统输出','神经网络输出');
title('训练结束后系统的响应曲线');
subplot(3,1,2)
plot(errorhistory);
title('训练过程的误差变化曲线');
subplot(3,1,3)
plot(t,y4,'k-',t,testnnout1,'r--');
legend('实际系统输出','神经网络输出');
title('测试响应曲线');