clear all;
p=-1:0.1:0.9;
t=[-0.832 -0.423 -0.024 0.344 1.282 3.456 4.020 3.232 2.102 1.504 0.248...
1.242 2.344 3.262 2.052 1.684 1.022 2.224 3.022 1.984];
t1=clock
net=newrbe(p,t,0.1)
%%net=newrb(p,t,0.1,0.1,20,7)
datat=etime(clock,t1)
save net61 net
BP网络拟合sin函数
P=1:2:100;
T=sin(P*0.1);
l=3:12;
for i=1:10;
net = newff(minmax(P),[l(i) 1],{'tansig' 'purelin'},'trainlm');
net.trainParam.goal=1e-8
net.trainParam.epochs=1000;
net.trainParam.min_grad=1e-20;
net.trainParam.show=200;
net.trainParam.time=inf;
net= train(net,P,T)
P_test=1:2:200;
T_test=sin(P_test*0.1);
k=length(T_test);
X=sim(net,P_test)
for j=1:k
error(i)=abs(X(i)-T_test(j))/k;
end
end
plot(1:length(P_test),T_test,'r+:',1:length(P_test),X,'bo:')
P=1:2:200;
T=sin(P*0.1);
net = newff(minmax(P),[7 1],{'tansig' 'purelin'},'trainlm');
net.trainParam.goal=1e-8
net.trainParam.epochs=1000;
net.trainParam.min_grad=1e-20;
net.trainParam.show=200;
net.trainParam.time=inf;
net= train(net,P,T)
P_test=1:2:200;
T_test=sin(P_test*0.1);
X=sim(net,P_test)
plot(P,T,'r+:',P_test,X,'bo:')
RBF神经网络
clear
close all
train_data=xlsread('E:\作业及文档\智能控制\程序\七个RBF神经网络的源程序\train');%载入数据
train_p=(train_data(:,2))';
train_t=(train_data(:,3))';
test_data=xlsread('E:\作业及文档\智能控制\程序\七个RBF神经网络的源程序\test');%载入数据
test_p=(test_data(:,2))';
test_t=(test_data(:,3))';
%---------------------------------------------------
% 训练样本、测试样本的数据归一化处理
% [PN1,minp,maxp,TN1,mint,maxt] = premnmx(train_p,train_t);%训练样本归一化
% PN2 = tramnmx(test_p,minp,maxp);%测试样本归一化
[TN1,mint,maxt] = premnmx(train_t);%训练样本归一化
TN2 = tramnmx(test_t,mint,maxt);
% 训练样本正交扩展
V1=train_p;V2=train_p.^2;V3=train_p.^3;V4=train_p.^4;V5=train_p.^5;
% 训练样本归一化处理
[VP1,minV1,maxV1] = premnmx(V1);%训练样本归一化
[VP2,minV2,maxV2] = premnmx(V2);%训练样本归一化
[VP3,minV3,maxV3] = premnmx(V3);%训练样本归一化
[VP4,minV4,maxV4] = premnmx(V4);%训练样本归一化
[VP5,minV5,maxV5] = premnmx(V5);%训练样本归一化
PN1=[VP1;VP2;VP3;VP4;VP5];
% 测试样本正交扩展
VC1=test_p;VC2=test_p.^2;VC3=test_p.^3;VC4=test_p.^4;VC5=test_p.^5;
% 测试样本归一化处理
[VT1,minVC1,maxVC1] = premnmx(VC1);%训练样本归一化
[VT2,minVC2,maxVC2] = premnmx(VC2);%训练样本归一化
[VT3,minVC3,maxVC3] = premnmx(VC3);%训练样本归一化
[VT4,minVC4,maxVC4] = premnmx(VC4);%训练样本归一化
[VT5,minVC5,maxVC5] = premnmx(VC5);%训练样本归一化
PN2=[VT1;VT2;VT3;VT4;VT5];
%---------------------------------------------------
% 训练RBF神经网络
% 神经元数逐步增加,最多就是训练样本个数
goal = 1e-1; % 训练误差的平方和(默认为0)
spread = 0.01; % 此值越大,需要的神经元就越少(默认为1)
MN = size(PN1,2); % 最大神经元数(默认为训练样本个数)
DF = 1; % 显示间隔(默认为25)(两次显示之间所添加的神经元神经元数目)
net = newrb(PN1,TN1,goal,spread,MN,DF);
% net = newrb(PN1,TN1,goal,spread);
%---------------------------------------------------
% 测试
YN1 = sim(net,PN1); % 训练样本实际输出
YN2 = sim(net,PN2); % 测试样本实际输出
% MSE1 = mean((TN1-YN1).^2) % 训练均方误差
% MSE2 = mean((TN2-YN2).^2) % 测试均方误差
% figure
% plot(1:length(test_t),test_t,'r+:')
%---------------------------------------------------
% 反归一化
Y2 = postmnmx(YN2,mint,maxt);
%---------------------------------------------------
% 结果作图
figure
plot(1:length(test_t),test_t,'r+:',1:length(Y2),Y2,'b--')
title('+为真实值,o为预测值')
legend('真实值','预测值');
error_rbf=abs(X-T)./T
figure(1)
plot(P,error_rbf)
xlabel('样本点')
ylabel('相对误差')
title('训练样本仿真误差图')
figure(2)
error_rbf=abs(X-T)./T
figure(1)
plot(x,error_rbf,'k')
xlabel('样本点')
ylabel('误差的变化')
title('训练样本仿真误差图')
figure(2)
clear all
clc
k=0.05
m=1.0/k
n=1.0/k
X=[k:k:1]
Y=[k:k:1]
p=zeros(2,m*n)
for i=1:m
for j=1:m
p(1,(i-1)*m+j)=X(i)
p(2,(i-1)*m+j)=Y(j)
end
end
Z1=zeros(1,m*n)
for i=1:m
for j=1:m
Z1(1,(i-1)*m+j)=4*X(i).^3-4.5*X(i).^2+1.5*X(i)-Y(j)
end
end
eg=0.02
sc=1
net=newrb(p,Z1,eg,sc)
Z2=sim(net,p)
ZZ1=zeros(m,n)
ZZ2=zeros(m,n)
for i=1:m
for j=1:m
ZZ1(i,j)=Z1(1,(i-1)*m+j)
ZZ2(i,j)=Z2(1,(i-1)*m+j)
end
end
%%plot(X,ZZ1,'ko-',X,ZZ2,'b+-')
subplot(1,2,1)
surf(X,Y,ZZ1)
title('期望输出')
subplot(1,2,2)
surf(X,Y,ZZ2)
title('实际输出')
p=[-6.0 -6.1 -4.1 -4.0 +4.0 +4.1 +6.0 +6.1];
t=[+0.0 +0.0 +0.97 +0.99 +0.01 +0.03 +1.0 +1.0];
w=-1:0.1:1;
b=-2.5:0.25:+2.5;
es=errsurf(p,t,w,b,'logsig');
plotes(w,b,es,[60 30])
function[xm,fv]=PSO(fitness,N,c1,c2,w,M,D)
format long;
for i=1:N
for j=1:D
x(i,j)=randn;
v(i,j)=randn;
end
end
for i=1:N
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
pg=x(N,:);
for i=1:(N-1)
if fitness(x(i,:))<fitness(pg)
pg=x(i,:);
end
end
for t=1:M
for i=1:N
v(i,:)=w*v(i,:)+c1*rand*(y(i,:)-x(i,:))+c2*rand*(pg-x(i,:));
x(i,:)=x(i,:)+v(i,:);
if fitness(x(i,:))<p(i);
y(i,:)=x(i,:);
end
if p(i)<fitness(pg)
pg=y(i,:);
end
end
Pbest(t)=fitness(pg);
end
disp('*********************************************')
disp('')
xm=pg'
disp('')
fv=fitness(pg)
disp('*************************************************')
net=newrbe(P,T,2)
X=sim(net,P)
P_test=0:0.01:0.5
T=4*P_test.^3-4.5*P_test.^2+1.5*P_test;
error_rbf=abs(X-T)./T
figure(1)
plot(P,error_rbf,'k')
xlabel('样本点')
ylabel('误差的变化')
title('训练样本仿真误差图')
figure(2)
plot(P_test,X,'b+-.',P,T,'ko-')
xlabel('样本点')
ylabel('输出值')
legend('预测值','真实值')
title('仿真结果')
grid on
clear all
clc
P=[0 0.025 0.05 0.075 0.1 0.125 0.15 0.175;0 0.12877 0.26512 0.40916 0.56097...
0.72063 0.88821 1.0638]
T=[0.025003 0.049985 0.07499 0.099981 0.124998 0.15 0.17501 0.199996;
0.12883 0.26511 0.40912 0.56104 0.72061 0.8882 1.0638 1.2475;
0.19618 0.1875387 0.18335 0.17938 0.17553 0.17189 0.16837 0.165;
0.98146 0.98225 0.98309 0.98379 0.98446 0.9851 0.98572 0.98633]
clear all
clc
A=-1:0.1:1
B=[-0.9602 -0.5770 -0.0729 0.3771 0.6405 0.6600 0.4609...
0.1336 -0.2013 -0.4344 -0.5000 -0.3930 -0.1647 0.0988...
0.3072 0.3960 0.3449 0.1816 -0.0312 -0.2189 -0.3201]
net=newrbe(A,B,1)
C=-1:0.01:1
D=sim(net,C)
figure(2)
xlabel('Input')
ylabel('Output')
plot(A,B,'+',C,D)
legend('原始值','拟合曲线')
评论0