import torch
import torch.nn as nn
import math
#-----------------------------------------RNN-----------------------------------------
class RNN(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.num_features = num_features
# Defining the number of layers and the nodes in each layer
self.hidden_units = hidden_units
self.num_layers = num_layers
self.output_size = output_size
self.dropout = dropout_rate
# RNN layers
self.rnn = nn.RNN(
input_size=num_features,
hidden_size=hidden_units,
batch_first=True,
num_layers=num_layers,
dropout=dropout_rate
)
self.linear = nn.Linear(in_features=self.hidden_units, out_features=self.output_size)
def forward(self, x):
# (batch_size, seq_length, feature)
batch_size = x.shape[0]
h0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
_, hn = self.rnn(x, h0)
out = self.linear(hn[0]).flatten() # First dim of Hn is num_layers, which is set to 1 above.
return out
# -----------------------------------------LSTM-----------------------------------------
class LSTM(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.num_features = num_features #
# Defining the number of layers and the nodes in each layer
self.hidden_units = hidden_units
self.num_layers = num_layers
self.output_size = output_size
self.dropout = dropout_rate
# LSTM layers
self.lstm = nn.LSTM(
input_size=num_features,
hidden_size=hidden_units,
batch_first=True,
num_layers=num_layers,
dropout=dropout_rate
)
# Fully connected layer
self.linear = nn.Linear(in_features=self.hidden_units, out_features=self.output_size)
def forward(self, x):
batch_size = x.shape[0]
h0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
c0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
_, (hn, _) = self.lstm(x, (h0, c0))
out = self.linear(hn[0]).flatten() # First dim of Hn is num_layers, which is set to 1 above
return out
#-----------------------------------------GRU-----------------------------------------
class GRU(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.rnn = nn.GRU(
input_size=num_features,
hidden_size=hidden_units,
num_layers=num_layers,
batch_first=True,
dropout=dropout_rate,
)
self.fc_out = nn.Linear(hidden_units, output_size)
self.d_feat = num_features
def forward(self, x):
out, _ = self.rnn(x)
out = self.fc_out(out[:, -1, :]).flatten()
return out
#-----------------------------------------CNN_LSTM-----------------------------------------
class CNN_LSTM(nn.Module):
def __init__(self, num_features, seq_length, hidden_units, num_layers, output_size, dropout_rate):
super(CNN_LSTM, self).__init__()
self.hidden_size = hidden_units
self.num_layers = num_layers
self.conv = nn.Conv1d(seq_length, seq_length, 1)
self.lstm = nn.LSTM(num_features, hidden_units, num_layers, batch_first=True, dropout=dropout_rate)
self.fc = nn.Linear(hidden_units, output_size)
def forward(self, x,_):
x = self.conv(x)
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size) # 初始化隐藏状态h0
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size) # 初始化记忆状态c0
# print(f"x.shape:{x.shape},h0.shape:{h0.shape},c0.shape:{c0.shape}")
out, _ = self.lstm(x, (h0, c0)) # LSTM前向传播
out = self.fc(out[:, -1, :]).flatten() # 取最后一个时间步的输出作为预测结果
return out
# -----------------------------------------ALSTM-----------------------------------------
class ALSTMModel(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.hid_size = hidden_units
self.input_size = num_features
self.dropout = dropout_rate
self.output_size = output_size
self.rnn_layer = num_layers
self._build_model()
def _build_model(self):
self.net = nn.Sequential()
self.net.add_module("fc_in", nn.Linear(in_features=self.input_size, out_features=self.hid_size))
self.net.add_module("act", nn.Tanh())
self.rnn = nn.LSTM(
input_size=self.hid_size,
hidden_size=self.hid_size,
num_layers=self.rnn_layer,
batch_first=True,
dropout=self.dropout,
)
self.fc_out = nn.Linear(in_features=self.hid_size * 2, out_features=1)
self.att_net = nn.Sequential()
self.att_net.add_module(
"att_fc_in",
nn.Linear(in_features=self.hid_size, out_features=int(self.hid_size / 2)),
)
self.att_net.add_module("att_dropout", torch.nn.Dropout(self.dropout))
self.att_net.add_module("att_act", nn.Tanh())
self.att_net.add_module(
"att_fc_out",
nn.Linear(in_features=int(self.hid_size / 2), out_features=self.output_size, bias=False),
)
self.att_net.add_module("att_softmax", nn.Softmax(dim=1))
def forward(self, inputs, adj):
# [batch, seq_len, input_size]
rnn_out, _ = self.rnn(self.net(inputs)) # [batch, seq_len, num_directions * hidden_size]
attention_score = self.att_net(rnn_out) # [batch, seq_len, 1]
out_att = torch.mul(rnn_out, attention_score)
out_att = torch.sum(out_att, dim=1)
out = self.fc_out(
torch.cat((rnn_out[:, -1, :], out_att), dim=1)
) # [batch, seq_len, num_directions * hidden_size] -> [batch, 1]
return out.flatten()
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
本文采用欧盟碳排放交易体系下的欧盟碳排放配额(EUA)数据。 实验采用在2013年4月15日至2020年3月30日期间使用的相同大小的数据集,包括1797个样本。 前80% 的数据被选为训练集,其余20% 被选为测试集 学习5种模型 RNN、LSTM、GRU、CNN-LSTM、LSTM-Attn
资源推荐
资源详情
资源评论
收起资源包目录
Python碳价格时间序列预测.zip (9个子文件)
utils.py 4KB
main.py 4KB
data
EUA-ICE.xlsx 84KB
models.py 6KB
tool.py 3KB
figures
pred.png 183KB
loss.png 67KB
save_dict
gru.chkpt 151KB
lstm.chkpt 200KB
共 9 条
- 1
资源评论
- 霍格沃兹在读硕士2023-11-23资源不错,很实用,内容全面,介绍详细,很好用,谢谢分享。
- lsr_12023-12-20资源很实用,对我启发很大,有很好的参考价值,内容详细。
- 2301_776085452023-11-19资源有很好的参考价值,总算找到了自己需要的资源啦。
Python量化投资、代码解析与论文精读
- 粉丝: 8374
- 资源: 40
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功