import os
import math
import numpy as np
import datetime as dt
from numpy import newaxis
from core.utils import Timer
from keras.layers import Dense, Activation, Dropout, LSTM
from keras.models import Sequential, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
class Model():
"""LSTM 模型"""
def __init__(self):
self.model = Sequential()
def load_model(self, filepath):
print('[Model] Loading model from file %s' % filepath)
self.model = load_model(filepath)
def build_model(self, configs):
timer = Timer()
timer.start()
for layer in configs['model']['layers']:
neurons = layer['neurons'] if 'neurons' in layer else None
dropout_rate = layer['rate'] if 'rate' in layer else None
activation = layer['activation'] if 'activation' in layer else None
return_seq = layer['return_seq'] if 'return_seq' in layer else None
input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
input_dim = layer['input_dim'] if 'input_dim' in layer else None
if layer['type'] == 'dense':
self.model.add(Dense(neurons, activation=activation))
if layer['type'] == 'lstm':
self.model.add(LSTM(neurons, input_shape=(input_timesteps, input_dim), return_sequences=return_seq))
if layer['type'] == 'dropout':
self.model.add(Dropout(dropout_rate))
self.model.compile(loss=configs['model']['loss'], optimizer=configs['model']['optimizer'])
print('[Model] Model Compiled')
timer.stop()
return self.model
def train(self, x, y, epochs, batch_size, save_dir):
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
callbacks = [
EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True)
]
self.model.fit(
x,
y,
epochs=epochs,
batch_size=batch_size,
callbacks=callbacks
)
self.model.save(save_fname)
print('[Model] Training Completed. Model saved as %s' % save_fname)
timer.stop()
def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch, save_dir):
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch))
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
callbacks = [
ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True)
]
self.model.fit_generator(
data_gen,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
workers=1
)
print('[Model] Training Completed. Model saved as %s' % save_fname)
timer.stop()
def predict_point_by_point(self, data,debug=False):
if debug == False:
print('[Model] Predicting Point-by-Point...')
predicted = self.model.predict(data)
predicted = np.reshape(predicted, (predicted.size,))
else:
print('[Model] Predicting Point-by-Point...')
print (np.array(data).shape)
predicted = self.model.predict(data)
print (np.array(predicted).shape)
predicted = np.reshape(predicted, (predicted.size,))
print (np.array(predicted).shape)
return predicted
def predict_sequences_multiple(self, data, window_size, prediction_len,debug=False):
if debug == False:
print('[Model] Predicting Sequences Multiple...')
prediction_seqs = []
for i in range(int(len(data)/prediction_len)):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(prediction_len):
predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
else :
print('[Model] Predicting Sequences Multiple...')
prediction_seqs = []
for i in range(int(len(data)/prediction_len)):
print (data.shape)
curr_frame = data[i*prediction_len]
print (curr_frame)
predicted = []
for j in range(prediction_len):
predict_result = self.model.predict(curr_frame[newaxis,:,:])
print (predict_result)
final_result = predict_result[0,0]
predicted.append(final_result)
curr_frame = curr_frame[1:]
print (curr_frame)
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
print (curr_frame)
prediction_seqs.append(predicted)
def predict_sequence_full(self, data, window_size):
print('[Model] Predicting Sequences Full...')
curr_frame = data[0]
predicted = []
for i in range(len(data)):
predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
return predicted
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
时间序列数据预测(广告打点、正弦波、sp500) 过程: 1. keras三层LSTM 2. 模型参数可配置(参数放json文件中,运行时可修改,以方便调参炼丹) 3. 数据预处理 4. 训练、测试 5. 结果精度展示 main: #RNN时间序列 #读取数据 #创建RNN模型 #加载训练数据 #训练模型 #测试结果 #展示测试效果
资源推荐
资源详情
资源评论
收起资源包目录
Time-Series-stock.zip (29个子文件)
model.png 33KB
results_multiple_2.png 118KB
results_1.png 48KB
data
sinewave.csv 65KB
adv3.csv 557KB
sp500.csv 308KB
config_3.json 948B
saved_models
22122018-003214-e2.h5 860KB
22122018-125105-e1.h5 2.35MB
22122018-003750-e1.h5 2.34MB
22122018-003320-e1.h5 2.34MB
22122018-012051-e1.h5 2.34MB
22122018-122758-e1.h5 2.35MB
22122018-003528-e1.h5 2.34MB
22122018-122334-e1.h5 2.35MB
22122018-012224-e1.h5 2.35MB
22122018-124824-e1.h5 2.34MB
22122018-011338-e1.h5 2.34MB
22122018-123819-e1.h5 2.34MB
config_2.json 816B
core
utils.py 249B
model.py 5KB
data_processor.py 3KB
run.py 2KB
config_1.json 802B
results_2.png 113KB
.gitignore 19B
RNN.png 154KB
results_multiple_1.png 45KB
共 29 条
- 1
资源评论
ziix
- 粉丝: 2435
- 资源: 201
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- 采用P-f和Q-V滞控的去中心化逆变器型交流微电网的模拟(Simulink仿真实现)
- 彩虹聚合二级域名DNS管理系统源码v1.3
- 【TOF相机笔记3】Simulink使用方法
- 算法部署-基于C++和Python使用ONNXRuntime部署RT-DETR目标检测算法-附项目源码-优质项目实战.zip
- Bitree.cpp
- 改变浏览器大小,图片(img)内容居中显示
- 全景分割-基于FAIR-DETR对Cityscapes数据集进行微调实现全景分割-附项目源码-优质项目实战.zip
- Tru master.m4a
- 基于ELMAN神经网络的用气量预测,基于ELMAN的天然气消费量预测(代码完整,数据齐全)
- 基于Vue3+ThreeJS实现机械臂控制和预览+源码+开发文档+代码解析(高分优秀项目)
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功