%读取数据
clc
clear
close all
clear global ver
warning('off');
%训练数据和预测数据
[num,ax,ay]=xlsread('a.xlsx');
n = randperm(length(num));
input_train=num(n(1:100),1:5)';
output_train=num(n(1:100),6:7)';
input_test=num(101:130,1:5)';
output_test=num(101:130,6:7)';
%
% global minAllSamOut;
% global maxAllSamOut;
[AllSamInn,minAllSamIn,maxAllSamIn,AllSamOutn,minAllSamOut,maxAllSamOut]=premnmx(input_train,output_train);
% Evaluating Sample
EvaSamIn=input_test;
EvaSamInn=tramnmx(EvaSamIn,minAllSamIn,maxAllSamIn); %
% AllSamIn=tramnmx(EvaSamIn,minAllSamIn,maxAllSamIn); %
% AllSamOut=tramnmx(EvaSamOut,minAllSamOut,maxAllSamOut);
% global Ptrain;
Ptrain = AllSamInn;
% global Ttrain;
Ttrain = AllSamOutn;
inputnum=5;
% global hiddennum;
hiddennum=[10 8 6];
% global outdim;
outputnum=2;
% Initialize PSO parameters
vmax=0.5; % Maximum velocity
minerr=0.0000000001; % Minimum error
wmax=0.90;
wmin=0.30;
% global itmax; %Maximum iteration number
itmax=10;
c1=2;
c2=2;
for iter=1:itmax
W(iter)=wmax-((wmax-wmin)/itmax)*iter; % weight declining linearly
end
% particles are initialized between (a,b) randomly
a=-1;
b=1;
%Between (m,n), (which can also be started from zero)
m=-1;
n=1;
% global N; % number of particles
N=40;
% global D; % length of particle
D=(inputnum+1)*hiddennum(1) + (hiddennum(1)+1)*hiddennum(2) + (hiddennum(2)+1)*hiddennum(3) + (hiddennum(3)+1)*outputnum ;
% Initialize positions of particles
% rand('state',sum(100*clock));
X=a+(b-a)*rand(N,D,1); %取值范围[-1,1] rand * 2 - 1 ,rand 产生[0,1]之间的随机数
%Initialize velocities of particles
V=0.2*(m+(n-m)*rand(N,D,1));
%
% global fvrec;
MinFit=[];
BestFit=[];
%Function to be minimized, performance function,i.e.,mse of net work 神经网络建立
% global net;
net=newff(minmax(Ptrain),[hiddennum,outputnum],{'logsig','logsig','tansig','tansig'},'trainlm');
fitness=fitcal(X,net,inputnum,hiddennum,outputnum,D,Ptrain,Ttrain,minAllSamOut,maxAllSamOut);
fvrec(:,1,1)=fitness(:,1,1);
[C,I]=min(fitness(:,1,1));
MinFit=[MinFit C];
BestFit=0.9;
L(:,1,1)=fitness(:,1,1); %record the fitness of particle of every iterations
B(1,1,1)=C; %record the minimum fitness of particle
gbest(1,:,1)=X(I,:,1); %the global best x in population
%Matrix composed of gbest vector
for p=1:N
G(p,:,1)=gbest(1,:);
end
for i=1:N;
pbest(i,:,1)=X(i,:);
end
V(:,:,2)=W(1)*V(:,:,1)+c1*rand*(pbest(:,:,1)-X(:,:,1))+c2*rand*(G(:,:,1)-X(:,:,1));
%V(:,:,2)=cf*(W(1)*V(:,:,1)+c1*rand*(pbest(:,:,1)-X(:,:,1))+c2*rand*(G(:,:,1)-X(:,:,1)));
%V(:,:,2)=cf*(V(:,:,1)+c1*rand*(pbest(:,:,1)-X(:,:,1))+c2*rand*(G(:,:,1)-X(:,:,1)));
% limits velocity of particles by vmax
for ni=1:N
for di=1:D
if V(ni,di,2)>vmax
V(ni,di,2)=vmax;
elseif V(ni,di,2)<-vmax
V(ni,di,2)=-vmax;
else
V(ni,di,2)=V(ni,di,2);
end
end
end
X(:,:,2)=X(:,:,1)+V(:,:,2);
for ni=1:N
for di=1:D
if X(ni,di,2)>1
X(ni,di,2)=1;
elseif X(ni,di,2)<-1
X(ni,di,2)=-1;
else
X(ni,di,2)=X(ni,di,2);
end
end
end
%******************************************************
for j=2:itmax
disp('Iteration and Current Best Fitness')
disp(j-1)
disp(B(1,1,j-1))
% Calculation of new positions
fitness=fitcal(X,net,inputnum,hiddennum,outputnum,D,Ptrain,Ttrain,minAllSamOut,maxAllSamOut);
% fvrec(:,1,j)=fitness(:,1,j);
%[maxC,maxI]=max(fitness(:,1,j));
%MaxFit=[MaxFit maxC];
%MeanFit=[MeanFit mean(fitness(:,1,j))];
[C,I]=min(fitness(:,1,j));
MinFit=[MinFit C];
BestFit=[BestFit min(MinFit)];
L(:,1,j)=fitness(:,1,j);
B(1,1,j)=C;
gbest(1,:,j)=X(I,:,j);
[C,I]=min(B(1,1,:));
% keep gbest is the best particle of all have occured
if B(1,1,j)<=C
gbest(1,:,j)=gbest(1,:,j);
else
gbest(1,:,j)=gbest(1,:,I);
end
% if C<=minerr, break, end
%Matrix composed of gbest vector
if j>=itmax, break, end
for p=1:N
G(p,:,j)=gbest(1,:,j);
end
for i=1:N;
[C,I]=min(L(i,1,:));
if L(i,1,j)<=C
pbest(i,:,j)=X(i,:,j);
else
pbest(i,:,j)=X(i,:,I);
end
end
V(:,:,j+1)=W(j)*V(:,:,j)+c1*rand*(pbest(:,:,j)-X(:,:,j))+c2*rand*(G(:,:,j)-X(:,:,j));
%V(:,:,j+1)=cf*(W(j)*V(:,:,j)+c1*rand*(pbest(:,:,j)-X(:,:,j))+c2*rand*(G(:,:,j)-X(:,:,j)));
%V(:,:,j+1)=cf*(V(:,:,j)+c1*rand*(pbest(:,:,j)-X(:,:,j))+c2*rand*(G(:,:,j)-X(:,:,j)));
for ni=1:N
for di=1:D
if V(ni,di,j+1)>vmax
V(ni,di,j+1)=vmax;
elseif V(ni,di,j+1)<-vmax
V(ni,di,j+1)=-vmax;
else
V(ni,di,j+1)=V(ni,di,j+1);
end
end
end
X(:,:,j+1)=X(:,:,j)+V(:,:,j+1);
for ni=1:N
for di=1:D
if X(ni,di,j+1)>1
X(ni,di,j+1)=1;
elseif X(ni,di,j+1)<-1
X(ni,di,j+1)=-1;
else
X(ni,di,j+1)=X(ni,di,j+1);
end
end
end
end
disp('Iteration and Current Best Fitness')
disp(j)
disp(B(1,1,j))
disp('Global Best Fitness and Occurred Iteration')
[C,I]=min(B(1,1,:));
% simulation network 网络拟合
x=gbest;
w1=x(1:inputnum*hiddennum(1));
B1=x(inputnum*hiddennum(1) +1 : inputnum*hiddennum(1)+hiddennum(1));
w2=x(inputnum*hiddennum(1) + hiddennum(1) + 1:inputnum*hiddennum(1) + hiddennum(1) + hiddennum(1)*hiddennum(2));
B2=x(inputnum*hiddennum(1) + hiddennum(1) + hiddennum(1)*hiddennum(2) +1:inputnum*hiddennum(1)+hiddennum(1)+hiddennum(1)*hiddennum(2)+hiddennum(2));
w3=x(inputnum*hiddennum(1) + hiddennum(1) + hiddennum(1)*hiddennum(2) + hiddennum(2) +1:inputnum*hiddennum(1)+hiddennum(1)+hiddennum(1)*hiddennum(2)+hiddennum(2)+hiddennum(2)*hiddennum(3));
B3=x(inputnum*hiddennum(1) + hiddennum(1) + hiddennum(1)*hiddennum(2) + hiddennum(2) + hiddennum(2)*hiddennum(3)+1:inputnum*hiddennum(1)+hiddennum(1)+hiddennum(1)*hiddennum(2)+hiddennum(2)+hiddennum(2)*hiddennum(3)+hiddennum(3));
w4=x(inputnum*hiddennum(1) + hiddennum(1) + hiddennum(1)*hiddennum(2) + hiddennum(2) + hiddennum(2)*hiddennum(3)+hiddennum(3)+1:inputnum*hiddennum(1)+hiddennum(1)+hiddennum(1)*hiddennum(2)+hiddennum(2)+hiddennum(2)*hiddennum(3)+hiddennum(3)+hiddennum(3)*outputnum);
B4=x(inputnum*hiddennum(1) + hiddennum(1) + hiddennum(1)*hiddennum(2) + hiddennum(2) + hiddennum(2)*hiddennum(3) +hiddennum(3)+hiddennum(3)*outputnum+1:inputnum*hiddennum(1)+hiddennum(1)+hiddennum(1)*hiddennum(2)+hiddennum(2)+hiddennum(2)*hiddennum(3)+hiddennum(3)+hiddennum(3)*outputnum+outputnum);
%网络权值赋值
net.iw{1,1}=reshape(w1,hiddennum(1),inputnum);
net.lw{2,1}=reshape(w2,hiddennum(2),hiddennum(1));
net.lw{3,2}=reshape(w3,hiddennum(3),hiddennum(2));
net.lw{4,3}=reshape(w4,outputnum,hiddennum(3));
net.b{1}=reshape(B1,hiddennum(1),1);
net.b{2}=reshape(B2,hiddennum(2),1);
net.b{3}=reshape(B3,hiddennum(3),1);
net.b{4}=[B4(1);B4(2) ];
%% BP网络训练
%网络进化参数
net.trainParam.epochs=2000;
net.trainParam.lr=0.0001;
net.trainParam.goal=0.000001;
net.trainParam.show=100;
net.trainParam.showWindow=0;
%网络训练
%[net,per2]=train(net,AllSamInn,AllSamOutn);
net=train(net,AllSamInn,AllSamOutn);
% nettesterr=mse(sim(net,Ptest)-Ttest);
% testsamout = postmnmx(sim(net,Ptest),minAllSamOut,maxAllSamOut);
% realtesterr=mse(testsamout-TargetOfTestSam)
EvaSamOutn = sim(net,EvaSamInn);
EvaSamOut = postmnmx(EvaSamOutn,minAllSamOut,maxAllSamOut);%反归一化
EvaSamOut(2,:) = EvaSamOut(2,:);
% output_test = round((output_test-100)/10);
% EvaSamOut = round((EvaSamOut-100)/10);
% for ii = 1:length(EvaSamOut)
% if EvaSamOut(ii)>0.5
% EvaSamOut(ii) =1;
% else
% EvaSamOut(ii)=0;
% end
% end
% ma = 0;
% for ii = 1:length(EvaSamOut)
% if EvaSamOut(ii)-output_test(ii)==0
% ma = ma+1;
% end
% end
error=EvaSamOut-output_test;
errormape=(EvaSamOut-output_test)./output_test;
[BPoutput1,error1] = bpp(num);
figure(1)
grid
hold on
plot((BestFit),'r');
title(['PSO适应度曲线 ' '最优代数=' I]);
xlabel('进化代数');ylabel('适应度');
legend('平均适应度','最佳适应度');
disp('适应度 变量');