%% 学习目标:基于灰色神经网络的预测算法
clear all
clc
%设置总的样本
X =[1.5220 0.5485 0.8680 0.6854 0.9844 0.5773;
1.4310 0.5943 0.7612 0.6567 0.9510 0.7184;
1.6710 0.6346 0.7153 0.6802 0.9494 0.6230;
1.7750 0.7838 0.8895 0.7442 0.9291 0.6924;
1.6300 0.5182 0.8228 0.6335 0.8668 0.5831;
1.6700 0.7207 0.8897 0.6690 0.9516 0.7863;
1.5920 0.6480 0.6915 0.7347 0.8530 0.4497;
2.0410 0.7291 0.9309 0.6788 0.9968 0.7356;
1.6310 0.7753 0.7970 0.7228 0.8702 0.7679;
2.0280 0.7923 0.8961 0.6363 0.9478 0.8039;
1.5860 0.7491 0.8884 0.6658 0.9398 0.8797;
1.7160 0.7550 0.7602 0.6157 0.9134 0.7204;
1.5110 0.5498 0.8127 0.6204 0.9284 0.6145;
1.4550 0.5404 0.7486 0.6328 0.9591 0.6857;
1.5680 0.6182 0.7471 0.6585 0.9802 0.6368;
1.8830 0.7931 0.9681 0.7646 0.8886 0.7411;
1.5620 0.5496 0.8658 0.7181 0.7832 0.5669;
1.6900 0.6644 0.8992 0.6357 0.9087 0.7933;
1.7910 0.5768 0.7130 0.7730 0.8829 0.4907;
2.0190 0.7473 0.9531 0.6768 0.9964 0.8092;
1.8520 0.8236 0.8079 0.6796 0.9272 0.8512;
1.5390 0.8640 0.8862 0.6386 0.9685 0.8567;
1.7280 0.7814 0.9410 0.6944 0.9629 0.8775;
1.6760 0.7285 0.7868 0.6987 0.8805 0.7630;
1.6670 0.5476 0.8223 0.6286 0.9355 0.5898;
1.3510 0.5557 0.7072 0.6811 0.9553 0.7326;
1.6030 0.5519 0.6816 0.7009 0.9736 0.6151;
1.8760 0.8039 0.8852 0.8068 0.9644 0.7477;
1.6310 0.4490 0.7941 0.7138 0.8281 0.5306;
1.7500 0.6729 0.8526 0.6223 0.9452 0.7562;
1.6000 0.6012 0.6640 0.7920 0.8878 0.4979;
1.9460 0.7751 0.9155 0.7032 0.9168 0.7432;
1.6360 0.7931 0.7635 0.6393 0.8757 0.7692;
1.8650 0.7598 0.8426 0.6756 0.9234 0.8065;
1.8140 0.7342 0.7572 0.6134 0.8862 0.7907];
[aa,bb]=size(X);
%数据累加作为网络输入
[n,m]=size(X);
for i=1:n
y(i,1)=sum(X(1:i,1));
y(i,2)=sum(X(1:i,2));
y(i,3)=sum(X(1:i,3));
y(i,4)=sum(X(1:i,4));
y(i,5)=sum(X(1:i,5));
y(i,6)=sum(X(1:i,6));
end
%网络参数初始化
a=0.3+rand(1)/4;
b1=0.3+rand(1)/4;
b2=0.3+rand(1)/4;
b3=0.3+rand(1)/4;
b4=0.3+rand(1)/4;
b5=0.3+rand(1)/4;
%学习速率初始化
u1=0.0015;
u2=0.0015;
u3=0.0015;
u4=0.0015;
u5=0.0015;
%权值阀值初始化
t=1;
w11=a;
w21=-y(1,1);
w22=2*b1/a;
w23=2*b2/a;
w24=2*b3/a;
w25=2*b4/a;
w26=2*b5/a;
w31=1+exp(-a*t);
w32=1+exp(-a*t);
w33=1+exp(-a*t);
w34=1+exp(-a*t);
w35=1+exp(-a*t);
w36=1+exp(-a*t);
theta=(1+exp(-a*t))*(b1*y(1,2)/a+b2*y(1,3)/a+b3*y(1,4)...
/a+b4*y(1,5)/a+b5*y(1,6)/a-y(1,1));
kk=1;
%设置样品数
cc=30;
%循环迭代
for j=1:20
%循环迭代
E(j)=0;
for i=1:cc
%网络输出计算
t=i;
LB_b=1/(1+exp(-w11*t)); %LB层输出
LC_c1=LB_b*w21; %LC层输出
LC_c2=y(i,2)*LB_b*w22; %LC层输出
LC_c3=y(i,3)*LB_b*w23; %LC层输出
LC_c4=y(i,4)*LB_b*w24; %LC层输出
LC_c5=y(i,5)*LB_b*w25; %LC层输出
LC_c6=y(i,6)*LB_b*w26; %LC层输出
LD_d=w31*LC_c1+w32*LC_c2+w33*LC_c3+w34*LC_c4+w35*LC_c5+w36*LC_c6; %LD层输出
theta=(1+exp(-w11*t))*(w22*y(i,2)/2+w23*y(i,3)/2+w24*y(i,4)/2+w25*y(i,5)/2+w26*y(i,6)/2-y(1,1)); %阀值
ym=LD_d-theta; %网络输出值
yc(i)=ym;
%权值修正
error=ym-y(i,1); %计算误差
E(j)=E(j)+abs(error); %误差求和
error1=error*(1+exp(-w11*t)); %计算误差
error2=error*(1+exp(-w11*t)); %计算误差
error3=error*(1+exp(-w11*t));
error4=error*(1+exp(-w11*t));
error5=error*(1+exp(-w11*t));
error6=error*(1+exp(-w11*t));
error7=(1/(1+exp(-w11*t)))*(1-1/(1+exp(-w11*t)))*(w21*error1+w22*error2+w23*error3+w24*error4+w25*error5+w26*error6);
%修改权值
w22=w22-u1*error2*LB_b;
w23=w23-u2*error3*LB_b;
w24=w24-u3*error4*LB_b;
w25=w25-u4*error5*LB_b;
w26=w26-u5*error6*LB_b;
w11=w11+a*t*error7;
end
end
%画误差随进化次数变化趋势
figure(1)
plot(E)
title('训练误差');
xlabel('进化次数');
ylabel('误差');
%根据训练出的灰色神经网络进行样本预测
for i=(cc+1):aa
t=i;
LB_b=1/(1+exp(-w11*t)); %LB层输出
LC_c1=LB_b*w21; %LC层输出
LC_c2=y(i,2)*LB_b*w22; %LC层输出
LC_c3=y(i,3)*LB_b*w23; %LC层输出
LC_c4=y(i,4)*LB_b*w24; %LC层输出
LC_c5=y(i,5)*LB_b*w25;
LC_c6=y(i,6)*LB_b*w26;
LD_d=w31*LC_c1+w32*LC_c2+w33*LC_c3+w34*LC_c4+w35*LC_c5+w36*LC_c6; %LD层输出
theta=(1+exp(-w11*t))*(w22*y(i,2)/2+w23*y(i,3)/2+w24*y(i,4)/2+w25*y(i,5)/2+w26*y(i,6)/2-y(1,1)); %阀值
ym=LD_d-theta; %网络输出值
yc(i)=ym;
end
yc=yc*1000;
y(:,1)=y(:,1)*100;
%计算预测值
for j=aa:-1:2
ys(j)=(yc(j)-yc(j-1))/10;
end
figure(2)
plot(ys((cc+1):aa),'-*');
hold on
plot(X((cc+1):aa,1)*100,'r:o');
legend('神经网络预测值','实际样本值')
title('灰色系统预测')
%计算平均相对误差
xderror=0;
for i=aa:-1:cc
xderror=xderror+abs(ys(i)-X(i,1))/yc(i);
end
xderror=xderror/(aa-cc)
%% 大仙QQ:1960009019
%% 在线教育微信公众号:大仙一品堂