%读取数据
clc
clear
close all
warning off
clear global ver
% [num1,ax,ay ]= xlsread('1.xlsx',1);
% nn = length(num1);
% num = zeros(nn-1,19);
% for ii = 1:nn-1
% num(ii,:) = [num1(ii,7) num1(ii,[8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25]) num1(ii+1,7)];
% end
% input_test1 = [num1(nn,7) num1(nn,[8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25])];%从新定义数据预测未来的数据,
% pan=19;
[num1,ax,ay ]= xlsread('return(y)+指标.xlsx',1);
nn = length(num1);
pan=num1(:,1);
panx =2;
pany = [];
for ii = nn:-1:1
for jj = 3:34
if isnan(num1(ii,jj))
num1(ii,:) = [];
break
end
end
if isnan(num1(ii,1))
num1(ii,:) = [];
end
end
nn = length(num1);
pan=num1(:,1);
num = zeros(nn-1,17);
for ii = 1:nn-1
num(ii,:) = [num1(ii,3) num1(ii,[20 21 22 23 24 25 26 27 28 29 30 31 32 33 34]) num1(ii+1,3)];
if pan(ii)~=panx
pany = [pany;ii-1 panx];
panx = pan(ii);
end
end
pan=16;
num0 = [];
input_test1=[];
for ii = 1:length(pany)
for jj = ii-50:ii
num0 = [num0; [num1(ii,3) num1(ii,[20 21 22 23 24 25 26 27 28 29 30 31 32 33 34]) num1(ii+1,3)]];
end
input_test1 = [input_test1;num0];
num0=[];
end
m =2000;
n = randperm(length(num));
input_train =num((1:m),1:pan)';%训练数据输入数据
output_train = num((1:m),pan+1)';%训练数据输出数据
input_test = num((m+1:3200),1:pan)';%测试数据输入数据
output_test = num((m+1:3200),pan+1)';%测试数据输出数据
output_test1 = input_test1(:,1+pan);
input_test1 = input_test1(:,1:pan);
[inputn,inputps]=mapminmax(input_train,-1,1);%训练数据的输入数据的归一化
[outputn,outputps]=mapminmax(output_train,-1,1);%训练数据的输出数据的归一化de
inputn_test=mapminmax('apply',input_test,inputps);
outputn_test=mapminmax('apply',output_test,outputps);
inputn_test1=mapminmax('apply',input_test1',inputps);
outputn_test1=mapminmax('apply',output_test1,outputps);
[lstm_pred,lstmerror1] = lstmfun(num,inputn,outputn,inputn_test,output_test,pan,outputps);
[AllSamInn,minAllSamIn,maxAllSamIn,AllSamOutn,minAllSamOut,maxAllSamOut]=premnmx(input_train,output_train);
EvaSamIn=input_test;
EvaSamInn=tramnmx(EvaSamIn,minAllSamIn,maxAllSamIn); % preprocessing
Ptrain = AllSamInn;
Ttrain = AllSamOutn;
% Initialize PSO
vmax=0.0151; % Maximum velocity
minerr=0.001; % Minimum error
wmax=0.90;
wmin=0.30;
% global itmax; %Maximum iteration number
itmax=2;
c1=2;
c2=2;
for iter=1:itmax
W(iter)=wmax-((wmax-wmin)/itmax)*iter; % weight declining linearly
end
%Between (m,n), (which can also be started from zero)
m=-1;
n=1;
% global N; % number of particles
N=5;
% global D; % length of particle
D=4;
gbests = [100 100 100 0.2];%
% particles are initialized between (a,b) randomly
a=[200 200 200 0.98];
b=[10 10 10 0.05];
% Initialize positions of particles
% rand('state',sum(100*clock));
X = [];
for ii = N
X =[X;a+(b-a).*rand(N,D,1)]; %取值范围[-1,1] rand * 2 - 1 ,rand 产生[0,1]之间的随机数
end
%Initialize velocities of particles
V=0.2*(m+(n-m)*rand(N,D,1));
%
% global fvrec;
MinFit=[];
BestFit=[];
fitness=fitcal(X,num,inputn,outputn,inputn_test,outputn_test,pan,outputps);
fvrec(:,1,1)=fitness(:,1,1);
[C,I]=min(fitness(:,1,1));
MinFit=[MinFit C];
BestFit=[BestFit C];
L(:,1,1)=fitness(:,1,1); %record the fitness of particle of every iterations
B(1,1,1)=C; %record the minimum fitness of particle
gbest(1,:,1)=X(I,:,1); %the global best x in population
%Matrix composed of gbest vector
for p=1:N
G(p,:,1)=gbest(1,:);
end
for ii=1:N
pbest(ii,:,1)=X(ii,:);
end
V(:,:,2)=W(1)*V(:,:,1)+c1*rand*(pbest(:,:,1)-X(:,:,1))+c2*rand*(G(:,:,1)-X(:,:,1));
for ni=1:N
for di=1:D
if V(ni,di,2)>vmax
V(ni,di,2)=vmax;
elseif V(ni,di,2)<-vmax
V(ni,di,2)=-vmax;
else
V(ni,di,2)=V(ni,di,2);
end
end
end
X(:,:,2)=X(:,:,1)+V(:,:,2);
for ni=1:N
for di=1:D
if X(ni,di,2)>1
X(ni,di,2)=1;
elseif X(ni,di,2)<-1
X(ni,di,2)=-1;
else
X(ni,di,2)=X(ni,di,2);
end
end
end
%******************************************************
for jj=2:itmax
jj
disp('Iteration and Current Best Fitness')
disp(jj-1)
disp(B(1,1,jj-1))
reset =1; % reset = 1时设置为粒子群过分收敛时将其打散,如果=1则不打散
if reset==1
bit = 1;
for k=1:N
bit = bit&(range(X(k,:))<0.02);
end
if bit==1 % bit=1时对粒子位置及速度进行随机重置
for ik = 1:N
X(ik,:) = funx; % present 当前位置,随机初始化
X(ik,:) = [0.02*rand()-0.01 0.02*rand()-0.01]; % 速度初始化
end
end
end
% Calculation of new positions
fitness=fitcal(X,num,inputn,outputn,inputn_test,output_test,pan,outputps);
[C,I]=min(fitness(:,1,jj));
MinFit=[MinFit C];
BestFit=[BestFit min(MinFit)];
L(:,1,jj)=fitness(:,1,jj);
B(1,1,jj)=C;
gbest(1,:,jj)=X(I,:,jj);
[C,I]=min(B(1,1,:));
% keep gbest is the best particle of all have occured
if B(1,1,jj)<=C
gbest(1,:,jj)=gbest(1,:,jj);
else
gbest(1,:,jj)=gbest(1,:,I);
end
if C<=minerr
break
end
%Matrix composed of gbest vector
if jj>=itmax
break
end
for p=1:N
G(p,:,jj)=gbest(1,:,jj);
end
for ii=1:N
[C,I]=min(L(ii,1,:));
if L(ii,1,jj)<=C
pbest(ii,:,jj)=X(ii,:,jj);
else
pbest(ii,:,jj)=X(ii,:,I);
end
end
V(:,:,jj+1)=W(jj)*V(:,:,jj)+c1*rand*(pbest(:,:,jj)-X(:,:,jj))+c2*rand*(G(:,:,jj)-X(:,:,jj));
for ni=1:N
for di=1:D
if V(ni,di,jj+1)>vmax
V(ni,di,jj+1)=vmax;
elseif V(ni,di,jj+1)<-vmax
V(ni,di,jj+1)=-vmax;
else
V(ni,di,jj+1)=V(ni,di,jj+1);
end
end
end
X(:,:,jj+1)=X(:,:,jj)+V(:,:,jj+1);
for ni=1:N
for di=1:D
if X(ni,di,jj+1)>1
X(ni,di,jj+1)=1;
elseif X(ni,di,jj+1)<-1
X(ni,di,jj+1)=-1;
else
X(ni,di,jj+1)=X(ni,di,jj+1);
end
end
end
end
disp('Iteration and Current Best Fitness')
disp(jj)
disp(B(1,1,jj))
disp('Global Best Fitness and Occurred Iteration')
[C,I]=min(B(1,1,:));
numFeatures = size(num(:,1:pan),2);%输入层维度
numResponses = size(num(:,end),2);%输出维度
fitval = zeros(nn,1);
x= gbest;
x(:,1:3) = round( gbest(:,1:3));
numHiddenUnits = x(1,1);%第一层维度
% a fully connected layer of size 50 & a dropout layer with dropout probability 0.5
layers = [ ...
sequenceInputLayer(numFeatures)%输入层
lstmLayer(numHiddenUnits,'OutputMode','sequence')%第一层
fullyConnectedLayer(x(1,2))%链接层
dropoutLayer(x(1,4))%遗忘层
fullyConnectedLayer(numResponses)%链接层
regressionLayer];%回归层
% Specify the training options.
% Train for 60 epochs with mini-batches of size 20 using the solver 'adam'
maxEpochs = 60;%最大迭代次数
miniBatchSize = x(1,3);%最小批量
% the learning rate == 0.01
% set the gradient threshold to 1
% set 'Shuffle' to 'never'
options = trainingOptions('adam', ... %解算器
'MaxEpochs',maxEpochs, ... %最大迭代次数
'MiniBatchSize',miniBatchSize, ... %最小批次
'InitialLearnRate',0.001, ... %初始学习率
'GradientThreshold',inf, ... %梯度阈值
'Shuffle','every-epoch', ... %打乱顺序
'Plots','none',... %画图
'Verbose',0); %不输出训练过程
%Train the Network
net = trainNetwork(inputn,outputn,layers,options);%开始训
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
基于MATLAB编程,用长短期神经网络LSTM进行涨跌幅预测,涨跌幅是一种时间序列的数据,用LSTM比一般神经网络 更适合,代码完整,包含数据,有注释,方便扩展应用 1,如有疑问,不会运行,可以私信, 2,需要创新,或者修改可以扫描二维码联系博主, 3,本科及本科以上可以下载应用或者扩展, 4,内容不完全匹配要求或需求,可以联系博主扩展。
资源推荐
资源详情
资源评论
收起资源包目录
22.rar (15个子文件)
psolstm.m 9KB
Predictc.m 603B
return(y)+指标.xlsx 18.16MB
Predict.m 604B
MSE_RMSE_MBE_MAE.m 361B
dandulstmd.m 3KB
MMNK.m 1KB
fitcal.m 2KB
lstmfun.m 2KB
R_2.m 317B
psolstm1.m 8KB
dandulstm.m 3KB
psolstmd.m 8KB
fitcal2.m 2KB
lstm.rar 16.71MB
共 15 条
- 1
资源评论
神经网络机器学习智能算法画图绘图
- 粉丝: 2801
- 资源: 659
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- W3CSchool全套Web开发手册中文CHM版15MB最新版本
- Light Table 的 Python 语言插件.zip
- UIkit中文帮助文档pdf格式最新版本
- kubernetes 的官方 Python 客户端库.zip
- 公开整理-2024年全国产业园区数据集.csv
- Justin Seitz 所著《Black Hat Python》一书的源代码 代码已完全转换为 Python 3,重新格式化以符合 PEP8 标准,并重构以消除涉及弃用库实现的依赖性问题 .zip
- java炸弹人游戏.zip学习资料程序资源
- Jay 分享的一些 Python 代码.zip
- 彩色形状的爱心代码.zip学习资料程序资源
- SQLAlchemy库:Python数据库操作的全方位指南
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功