# coding:utf-8
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.nn.utils import weight_norm
# from torch_geometric.nn import GCNConv #GCN相关
# class GCN(torch.nn.Module):
# def __init__(self, num_node_features, num_classes):
# super(GCN, self).__init__()
# self.conv1 = GCNConv(num_node_features, 16)
# self.conv2 = GCNConv(16, num_classes)
# def forward(self, data):
# x, edge_index = data.x, data.edge_index
# x = self.conv1(x, edge_index)
# x = F.relu(x)
# x = F.dropout(x, training=self.training)
# x = self.conv2(x, edge_index)
# x = F.relu(x)
# x = F.dropout(x, training=self.training)
# x = F.softmax(x, dim=1)
# return x
class SimpleRNN(nn.Module):
def __init__(self, input_size, hidden_size=32, output_size=1, num_layers=1, dropout=0.25):
super(SimpleRNN, self).__init__()
self.hidden_size = hidden_size
self.rnn = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
nonlinearity='relu', # 'tanh' or 'relu'
num_layers=num_layers,
dropout=dropout,
batch_first=True
)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
x = x.permute(1,0,2,3)#rnn(以及lstm)需要这样子的数据格式 多了一个 18站点那个维度
x_input = x[0]
output, hidden = self.rnn(x_input, hidden)
pred = self.linear(output[:, -1, :])
return pred, hidden
def init_hidden(self):
return torch.randn(1, 24, self.hidden_size)
class SimpleGRU(nn.Module):
def __init__(self, input_size, hidden_size, output_size=1, num_layers=1, dropout=0.25):
super(SimpleGRU, self).__init__()
self.hidden_size = hidden_size
self.gru = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
# dropout=dropout,
batch_first=True
)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
x = x.permute(1,0,2,3)
x_input = x[0]
output, hidden = self.gru(x_input, hidden)
pred = self.linear(output[:, -1, :])
return pred, hidden
def init_hidden(self):
return torch.randn(1, 24, self.hidden_size)
class SimpleLSTM(nn.Module):
def __init__(self, input_size, hidden_size, output_size=1, num_layers=1, dropout=0.25):
super(SimpleLSTM, self).__init__()
self.hidden_size = hidden_size
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
# dropout=dropout,
batch_first=True
)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = x.permute(1,0,2,3)
x_input = x[0]
output, (h_n, c_n) = self.lstm(x_input)
pred = self.linear(output[:, -1, :])
return pred
def init_hidden(self):
return torch.randn(1, 24, self.hidden_size)
######################
class TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(TCN, self).__init__()
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size, dropout=dropout)
self.linear = nn.Linear(num_channels[-1], output_size)
def forward(self, x):
output = self.tcn(x.transpose(1, 2)).transpose(1, 2)
pred = self.linear(output[:, -1, :])
return pred
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class STCN(nn.Module):
def __init__(self, input_size, in_channels, output_size, num_channels, kernel_size, dropout):
super(STCN, self).__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(1, 1), stride=1, padding=0),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=1, kernel_size=(1, 1), stride=1, padding=0),
torch.nn.BatchNorm2d(1),
torch.nn.ReLU()
)
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size, dropout=dropout)
self.linear = nn.Linear(num_channels[-1], output_size)
def forward(self, x):
conv_out = self.conv(x).squeeze(0)
output = self.tcn(conv_out.transpose(1, 2)).transpose(1, 2)
pred = self.linear(output[:, -1, :])
return pred
没有合适的资源?快使用搜索试试~ 我知道了~
基于深度学习的多任务空气质量预测模型设计与实现+高分项目+源码.zip
共48个文件
csv:36个
py:7个
md:3个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
0 下载量 187 浏览量
2024-10-12
15:18:07
上传
评论
收藏 5.1MB ZIP 举报
温馨提示
个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习的学习者,也可作为课程设计、期末大作业。个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习的学习者,也可作为课程设计、期末大作业。个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习的学习者,也可作为课程设计、期末大作业。个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习的学习者,也可作为课程设计、期末大作业。个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习的学习者,也可作为课程设计、期末大作业。个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习的学习者,也可作为课程设计、期末大作业。个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习的学习者,也可作为课程设计、期末大作业。个人经导师指导并认可通过的高分项目,评审分98分。主要针对计算机相关专业和需要项目实战练习
资源推荐
资源详情
资源评论
收起资源包目录
基于深度学习的多任务空气质量预测模型设计与实现+高分项目+源码.zip (48个子文件)
基于深度学习的多任务空气质量预测模型设计与实现+高分项目+源码
utils.py 2KB
.DS_Store 6KB
eval.py 3KB
data
stations_data
info.md 158B
df_station_1032.csv 713KB
df_station_1035.csv 720KB
df_station_1028.csv 718KB
df_station_1030.csv 716KB
df_station_1003.csv 722KB
df_station_1016.csv 723KB
df_station_1029.csv 717KB
df_station_1018.csv 723KB
df_station_1014.csv 723KB
df_station_1027.csv 718KB
df_station_1031.csv 719KB
df_station_1008.csv 724KB
df_station_1012.csv 723KB
df_station_1001.csv 721KB
df_station_1022.csv 559KB
df_station_1033.csv 722KB
df_station_1020.csv 721KB
df_station_1034.csv 724KB
df_station_1025.csv 718KB
df_station_1002.csv 719KB
df_station_1004.csv 722KB
df_station_1026.csv 719KB
df_station_1011.csv 726KB
df_station_1006.csv 723KB
df_station_1009.csv 722KB
df_station_1021.csv 722KB
df_station_1007.csv 721KB
df_station_1010.csv 724KB
df_station_1015.csv 722KB
df_station_1017.csv 724KB
df_station_1023.csv 720KB
df_station_1019.csv 724KB
df_station_1005.csv 721KB
df_station_1036.csv 726KB
df_station_1024.csv 720KB
df_station_1013.csv 723KB
xy
info.md 101B
models.py 7KB
cuda_test.py 164B
models
models.md 42B
.gitignore 80B
train.py 5KB
data_process.py 3KB
config.py 1KB
共 48 条
- 1
资源评论
墨痕_777
- 粉丝: 4480
- 资源: 776
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功