import torch
import torch.nn as nn
import math
#-----------------------------------------RNN-----------------------------------------
class RNN(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.num_features = num_features
# Defining the number of layers and the nodes in each layer
self.hidden_units = hidden_units
self.num_layers = num_layers
self.output_size = output_size
self.dropout = dropout_rate
# RNN layers
self.rnn = nn.RNN(
input_size=num_features,
hidden_size=hidden_units,
batch_first=True,
num_layers=num_layers,
dropout=dropout_rate
)
self.linear = nn.Linear(in_features=self.hidden_units, out_features=self.output_size)
def forward(self, x):
# (batch_size, seq_length, feature)
batch_size = x.shape[0]
h0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
_, hn = self.rnn(x, h0)
out = self.linear(hn[0]).flatten() # First dim of Hn is num_layers, which is set to 1 above.
return out
# -----------------------------------------LSTM-----------------------------------------
class LSTM(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.num_features = num_features #
# Defining the number of layers and the nodes in each layer
self.hidden_units = hidden_units
self.num_layers = num_layers
self.output_size = output_size
self.dropout = dropout_rate
# LSTM layers
self.lstm = nn.LSTM(
input_size=num_features,
hidden_size=hidden_units,
batch_first=True,
num_layers=num_layers,
dropout=dropout_rate
)
# Fully connected layer
self.linear = nn.Linear(in_features=self.hidden_units, out_features=self.output_size)
def forward(self, x):
batch_size = x.shape[0]
h0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
c0 = torch.zeros(self.num_layers, batch_size, self.hidden_units).requires_grad_()
_, (hn, _) = self.lstm(x, (h0, c0))
out = self.linear(hn[0]).flatten() # First dim of Hn is num_layers, which is set to 1 above
return out
#-----------------------------------------GRU-----------------------------------------
class GRU(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.rnn = nn.GRU(
input_size=num_features,
hidden_size=hidden_units,
num_layers=num_layers,
batch_first=True,
dropout=dropout_rate,
)
self.fc_out = nn.Linear(hidden_units, output_size)
self.d_feat = num_features
def forward(self, x):
out, _ = self.rnn(x)
out = self.fc_out(out[:, -1, :]).flatten()
return out
#-----------------------------------------CNN_LSTM-----------------------------------------
class CNN_LSTM(nn.Module):
def __init__(self, num_features, seq_length, hidden_units, num_layers, output_size, dropout_rate):
super(CNN_LSTM, self).__init__()
self.hidden_size = hidden_units
self.num_layers = num_layers
self.conv = nn.Conv1d(seq_length, seq_length, 1)
self.lstm = nn.LSTM(num_features, hidden_units, num_layers, batch_first=True, dropout=dropout_rate)
self.fc = nn.Linear(hidden_units, output_size)
def forward(self, x,_):
x = self.conv(x)
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size) # 初始化隐藏状态h0
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size) # 初始化记忆状态c0
# print(f"x.shape:{x.shape},h0.shape:{h0.shape},c0.shape:{c0.shape}")
out, _ = self.lstm(x, (h0, c0)) # LSTM前向传播
out = self.fc(out[:, -1, :]).flatten() # 取最后一个时间步的输出作为预测结果
return out
# -----------------------------------------ALSTM-----------------------------------------
class ALSTMModel(nn.Module):
def __init__(self, num_features, hidden_units, num_layers, output_size, dropout_rate):
super().__init__()
self.hid_size = hidden_units
self.input_size = num_features
self.dropout = dropout_rate
self.output_size = output_size
self.rnn_layer = num_layers
self._build_model()
def _build_model(self):
self.net = nn.Sequential()
self.net.add_module("fc_in", nn.Linear(in_features=self.input_size, out_features=self.hid_size))
self.net.add_module("act", nn.Tanh())
self.rnn = nn.LSTM(
input_size=self.hid_size,
hidden_size=self.hid_size,
num_layers=self.rnn_layer,
batch_first=True,
dropout=self.dropout,
)
self.fc_out = nn.Linear(in_features=self.hid_size * 2, out_features=1)
self.att_net = nn.Sequential()
self.att_net.add_module(
"att_fc_in",
nn.Linear(in_features=self.hid_size, out_features=int(self.hid_size / 2)),
)
self.att_net.add_module("att_dropout", torch.nn.Dropout(self.dropout))
self.att_net.add_module("att_act", nn.Tanh())
self.att_net.add_module(
"att_fc_out",
nn.Linear(in_features=int(self.hid_size / 2), out_features=self.output_size, bias=False),
)
self.att_net.add_module("att_softmax", nn.Softmax(dim=1))
def forward(self, inputs, adj):
# [batch, seq_len, input_size]
rnn_out, _ = self.rnn(self.net(inputs)) # [batch, seq_len, num_directions * hidden_size]
attention_score = self.att_net(rnn_out) # [batch, seq_len, 1]
out_att = torch.mul(rnn_out, attention_score)
out_att = torch.sum(out_att, dim=1)
out = self.fc_out(
torch.cat((rnn_out[:, -1, :], out_att), dim=1)
) # [batch, seq_len, num_directions * hidden_size] -> [batch, 1]
return out.flatten()
Python量化投资、代码解析与论文精读
- 粉丝: 8490
- 资源: 40
最新资源
- python的uds诊断相关接口
- 视觉生成领域中的并行自回归模型加速研究
- 基于51单片机和DS18B20的温度检测和报警系统,可设置报警温度上下限,输出温度采用数码管显示
- 2020年山东省职业院校技能大赛网络搭建与应用赛题
- bp神经网络交叉验证算法和确定最佳隐含层节点个数matlab 程序,直接运行即可 数据excel格式,注释清楚,效果清晰,一步上手
- 基于51单片机设计的火灾报警器,传感器包括烟雾,光强,温度传感器,同时本工程包含了labview的上位机
- Python毕业设计-YOLOV5火灾火焰烟雾检测数据集+训练好的模型+标注好的数据+pyqt界面+源码
- Minecraft Python Console
- llvm cmake fsf fasdf der
- 445981218017804USB摄像头.apk
- Python毕业设计-YOLOV5火灾火焰烟雾检测数据集+模型+源码
- 预训练扩散变换器线性化优化方法:引入CLEAR机制加速图像生成
- 基于YOLOv5的神经网络训练用于检测火灾初期的火焰和烟雾模型源码+数据集
- Python毕业设计-基于YOLOv5的神经网络训练用于检测火灾初期的火焰和烟雾模型源码+数据集
- 基于PINN方法的热传导问题求解及结果输出
- Python基于YOLOv5的神经网络训练用于检测火灾初期的火焰和烟雾模型源码+数据集
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈