import os
import matplotlib.pyplot as plt
import numpy as np
import optuna
import pandas as pd
from numpy.random import seed
from sklearn.metrics import mean_absolute_error
from tensorflow.keras.layers import Convolution1D, Activation, Conv1D
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.keras.layers import CuDNNLSTM, Flatten
set_random_seed(11)
seed(7)
os.chdir(r'C:\Projects\LStM时序预测加自动调参')
train_path = 'Data.csv'
data = pd.read_csv(train_path)
train = data.iloc[51:, :]
test = data.iloc[:50, :]
train.dropna(inplace=True)
train = train.values
test = test.values
# 建立CNN+LSTM模型
def create_model(look_back, units1, conv1, units2, lr, weight_decay):
model = Sequential()
model.add(CuDNNLSTM(units=units1, return_sequences=True, input_shape=(look_back, 3)))
model.add(Convolution1D(conv1, 4, padding='same', strides=2))
model.add(Activation('relu'))
model.add(Conv1D(units2, 4, strides=2, activation='relu', padding="same"))
model.add(Flatten())
model.add(Dense(pred))
optimizer = Adam(learning_rate=lr, decay=weight_decay)
model.compile(loss='mse', optimizer=optimizer, metrics=['mae'])
return model
def fit_size(train, y_train, test, test_y):
from sklearn import preprocessing
x_MinMax = preprocessing.MinMaxScaler()
y_MinMax = preprocessing.MinMaxScaler()
x = x_MinMax.fit_transform(train)
y = y_MinMax.fit_transform(y_train)
test_x = x_MinMax.transform(test)
test_y = y_MinMax.transform(test_y)
return x, y, test_x, test_y, y_MinMax
# 预测第6小时,第12小时的把pred改成12
def create_dataset(dataset, train_y, TIME_STEPS, pred):
dataX, dataY = [], []
for i in range(len(dataset) - TIME_STEPS - pred - 1):
a = dataset[i:(i + TIME_STEPS), :]
dataX.append(a)
y = train_y[(i + TIME_STEPS + 1):(i + TIME_STEPS + pred + 1), 0].tolist()
dataY.append(y)
TrainX = np.array(dataX)
Train_Y = pd.DataFrame(dataY).values
return TrainX, Train_Y
# 预测第6小时,第12小时的把pred改成12
def create_dataset1(dataset, train_y, TIME_STEPS, pred):
dataX, dataY = [], []
for i in range(0, int(len(dataset) / 49)):
x = dataset[i * 49:(i + 1) * 49, :]
y49 = train_y[i * 49:(i + 1) * 49, :]
for i in range(len(x) - TIME_STEPS - pred):
a = x[i:(i + TIME_STEPS), :]
dataX.append(a)
y = y49[(i + TIME_STEPS + 1):(i + TIME_STEPS + pred + 1), 0].tolist()
dataY.append(y)
TrainX = np.array(dataX)
Train_Y = pd.DataFrame(dataY).values
return TrainX, Train_Y
# 调参方法
def objective(trial):
weight_decay = trial.suggest_uniform('weight_decay', 0.009, 0.025)
lr = trial.suggest_uniform('lr', 0.002, 0.04)
conv1 = trial.suggest_int('conv1', 32, 50)
units1 = trial.suggest_int('units1', 1, 16)
units2 = trial.suggest_int('units2', 32, 64)
drop1 = trial.suggest_uniform('drop1', 0.2, 0.4)
drop2 = trial.suggest_uniform('drop2', 0.3, 0.5)
epochs = trial.suggest_int('epochs', 10, 30)
print([weight_decay, lr, conv1, units1, units2, drop1, drop2, epochs])
model = create_model(look_back, units1, conv1, units2, lr, weight_decay)
model.fit(X_train, y_train, batch_size=batch_size, validation_split=0.1, epochs=epochs, shuffle=False,
verbose=0)
pred_y = model.predict(X_test, batch_size=batch_size)
mae = mean_absolute_error(test_y, pred_y)
print(mae)
return mae
# 使用6行之前的数据
look_back = 6
# 预测未来4行数据
pred = 4
batch_size = 64
# 输入的参量可以选择,不是全部输入,可以这样对比结果
y_train = train[:, 0].reshape(-1, 1)
test_y = test[:, 0].reshape(-1, 1)
X_train, y_train, X_test, test_y, train_y_MinMax = fit_size(train, y_train, test, test_y)
X_train, y_train = create_dataset1(X_train, y_train, look_back, pred)
X_test, test_y = create_dataset(X_test, test_y, look_back, pred)
""" Run optimize.
Set n_trials and/or timeout (in sec) for optimization by Optuna
"""
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=50)
print('Best trial number: ', study.best_trial.number)
print('Best value:', study.best_trial.value)
print('Best parameters: \n', study.best_trial.params)
parameters = study.best_trial.params
# parameters = {'weight_decay': 0.01290914, 'lr': 0.0015873, 'conv1': 48, 'units1': 15, 'units2': 44,
# 'drop1': 0.3604710283, 'drop2': 0.35916614, 'epochs': 43}
# {'weight_decay': 0.011544699692321633, 'lr': 0.03714850010671891, 'conv1': 48,
# 'units1': 4, 'units2': 42, 'drop1': 0.20453530440333112, 'drop2': 0.42749144468700656, 'epochs': 24}
# 找到的最好的参数
weight_decay = parameters['weight_decay']
lr = parameters['lr']
conv1 = parameters['conv1']
units1 = parameters['units1']
units2 = parameters['units2']
drop1 = parameters['drop1']
drop2 = parameters['drop2']
epochs = parameters['epochs']
model = create_model(look_back, units1, conv1, units2, lr, weight_decay)
hist = model.fit(X_train, y_train, batch_size=batch_size, validation_split=0.1, epochs=epochs, shuffle=False, verbose=1)
# 画训练集和验证集的准确率图
plt.plot(hist.history['mae'], label='mae')
plt.plot(hist.history['val_mae'], label='val_mae')
plt.legend()
plt.show()
# 画损失图
plt.plot(hist.history['loss'], label='loss')
plt.plot(hist.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
# 预测测试集
pred_y = model.predict(X_test, batch_size=batch_size)
print(pred_y)
print('mae_score')
mae = mean_absolute_error(test_y, pred_y)
print(mae)
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
此文件包含数据Data.csv,数据包含三个字段 target,feature1,feature2 此文件包含代码lstm_50.py,用于预测未来4行数据 核心代码示例 model = Sequential() model.add(CuDNNLSTM(units=units1, return_sequences=True, input_shape=(look_back, 3))) model.add(Convolution1D(conv1, 4, padding='same', strides=2)) model.add(Activation('relu')) model.add(Conv1D(units2, 4, strides=2, activation='relu', padding="same")) model.add(Flatten()) model.add(Dense(pred)) optimizer = Adam 有问题请关注私聊,包此代码的答疑服务,基本秒回,不满意加球球包退款,可接受定制服务
资源推荐
资源详情
资源评论
收起资源包目录
LSTM多输出预测.zip (2个子文件)
LSTM多输出预测
lstm_50.py 6KB
Data.csv 6KB
共 2 条
- 1
AI信仰者
- 粉丝: 1w+
- 资源: 143
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
- 3
前往页