import pandas as pd
import matplotlib.pyplot as plt
import torch.nn as nn
import torch
import time
import numpy as np
import random
path = "AirPassengers.csv"
data = pd.read_csv(path)
data = data.iloc[:, 1]
# data.plot()
# plt.show()
# 归一化
train_data = data[:120].values
min_data = np.min(train_data)
max_data = np.max(train_data)
train_data_scaler = (train_data - min_data) / (max_data - min_data)
test_data = data[120:].values
def get_x_y(data, step=12):
x_y = []
for i in range(len(data) - step):
x_y.append([list(data[i: i + step]), [data[i + step]]])
return x_y
def get_mini_batch(data, batch_size):
for i in range(0, len(data) - batch_size, batch_size):
samples = data[i:i + batch_size]
x, y = [], []
for sample in samples:
x.append(sample[0])
y.append(sample[1])
yield np.expand_dims(np.asarray(x), axis=2), np.expand_dims(np.asarray(y), axis=2)
time_step = 12
train_x_y = get_x_y(train_data_scaler, step=time_step)
random.shuffle(train_x_y)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class LSTM(nn.Module): # 注意Module首字母需要大写
def __init__(self, hidden_size, num_layers, output_size, batch_size):
super().__init__()
self.hidden_size = hidden_size # 隐含层神经元数目 100
self.num_layers = num_layers # 层数 通常设置为2
self.output_size = output_size # 48 一次预测下48个时间步
self.num_directions = 1
self.input_size = 1
self.batch_size = batch_size
# 初始化隐藏层数据
self.hidden_cell = (
torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(device),
torch.randn(self.num_directions * self.num_layers, self.batch_size, self.hidden_size).to(device))
self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True).to(device)
self.fc = nn.Linear(self.hidden_size, self.output_size).to(device)
self.relu = nn.ReLU().to(device)
def forward(self, input):
output, _ = self.lstm(torch.FloatTensor(input).to(device), self.hidden_cell)
pred = self.fc(output)
pred1 = pred[:,-1,:]
#pred1 = self.relu(pred)[:, -1, :]
return pred1
hidden_size = 100 # 隐含层神经元数目
num_layers = 1 # lstm层数
output_size = 1 # 预测的时间步48
batch_size = 6 # 每次预测3个变量
model = LSTM(hidden_size, num_layers, output_size, batch_size).to(device)
loss_function = nn.MSELoss(reduce=True, size_average=True).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # 建立优化器实例
print(model)
epochs = 200
for i in range(epochs):
start = time.time()
for seq_batch, label_batch in get_mini_batch(train_x_y, batch_size):
optimizer.zero_grad()
y_pred = model(seq_batch)
loss = loss_function(y_pred, torch.FloatTensor(label_batch[:, :, 0]).to(device))
loss.backward() # 调用loss.backward()自动生成梯度,
optimizer.step() # 使用optimizer.step()执行优化器,把梯度传播回每个网络
# 查看模型训练的结果
print(f'epoch:{i:3} loss:{loss.item():10.8f} time:{time.time() - start:6}')
model.eval()
with torch.no_grad():
model.hidden_cell = (torch.zeros(1 * num_layers, 1, hidden_size).to(device),
torch.zeros(1 * num_layers, 1, hidden_size).to(device))
# 测试集
total_test_loss = 0
test_pred = []
for i in range(len(test_data)):
x = train_data_scaler[-time_step:]
print(x)
x1 = np.expand_dims(np.expand_dims(x, 1), 0)
test_y_pred_scalar = model(x1).cpu().squeeze().item() # 预测的值0-1
train_data_scaler = np.append(train_data_scaler,test_y_pred_scalar)
y = test_y_pred_scalar * (max_data - min_data) + min_data
test_pred.append(y)
print(test_data)
print(test_pred)
plt.plot(list(range(len(test_pred))),test_data,'ro-' )
plt.plot(list(range(len(test_pred))),test_pred,'bo-' )
plt.legend(["true","pred"])
plt.show()
#
#
# x_past = list(range(0, tw_past))
# x_pred = list(range(tw_past, tw_past + tw_pred))
# for i, one_series in enumerate(train_data[:, :, -tw_past:]):
# one_series2 = np.expand_dims(np.transpose(one_series), axis=0)
# test_pred = model(one_series2).cpu().squeeze().numpy() # 3*48
# test_pred2 = minMaxList[i].inverse_transform(test_pred) # 反归一化后的值
# test_true = val_data[i, :3, :] # 真实值
# test_loss = np.mean(np.power(test_pred2 - test_true, 2), axis=1)
# total_test_loss += test_loss
# plt.figure()
# plt.plot(x_pred, test_true[0, :], 'r-')
# plt.plot(x_pred, test_true[1, :], 'r-')
# plt.plot(x_pred, test_true[2, :], 'r-')
#
# plt.plot(x_pred, test_pred2[0, :], 'g-')
# plt.plot(x_pred, test_pred2[1, :], 'g-')
# plt.plot(x_pred, test_pred2[2, :], 'g-')
# plt.savefig('../img_pred2/pic-{}.png'.format(i))
# plt.close()
# print(total_test_loss)
pytorch lstm 时间序列 多时间步预测
需积分: 15 165 浏览量
2022-09-24
10:02:00
上传
评论 7
收藏 3KB RAR 举报
王小葱鸭
- 粉丝: 4546
- 资源: 13
评论0