'''
Description描述:
Autor作者: lhy
Date日期: 2023-11-12 22:06:12
'''
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
"""
其实这就是一个裁剪的模块,裁剪多出来的padding
"""
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
"""
相当于一个Residual block
:param n_inputs: int, 输入通道数 5 32 16 4
:param n_outputs: int, 输出通道数 32 16 4 1
:param kernel_size: int, 卷积核尺寸 3
:param stride: int, 步长,一般为1
:param dilation: int, 膨胀系数
:param padding: int, 填充系数
:param dropout: float, dropout比率
"""
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
# 经过conv1,输出的size其实是(Batch, input_channel, seq_len + padding)
self.chomp1 = Chomp1d(padding) # 裁剪掉多出来的padding部分,维持输出时间步为seq_len
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding) # 裁剪掉多出来的padding部分,维持输出时间步为seq_len
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
"""
参数初始化
:return:
"""
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
"""
:param x: size of (Batch, input_channel, seq_len)
:return:
"""
#print("x=",x.shape)
out = self.net(x)
#print("out=",out.shape)
res = x if self.downsample is None else self.downsample(x)
#print("res=", res.shape)
return (out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):#tcn(num_inputs=5,num_channels=[32,16,4,1],kernel_size=3, dropout=0.3)
"""
TCN,目前paper给出的TCN结构很好的支持每个时刻为一个数的情况,即sequence结构,
对于每个时刻为一个向量这种一维结构,勉强可以把向量拆成若干该时刻的输入通道,
对于每个时刻为一个矩阵或更高维图像的情况,就不太好办。
:param num_inputs: int, 输入通道数
:param num_channels: list,每层的hidden_channel数,例如[25,25,25,25]表示有4个隐层,每层hidden_channel数为25
:param kernel_size: int, 卷积核尺寸
:param dropout: float, drop_out比率
"""
super(TemporalConvNet, self).__init__()
#self.linear = nn.Linear(6, 1)
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i # 膨胀系数:1,2,4,8……
in_channels = num_inputs if i == 0 else num_channels[i - 1] # 确定每一层的输入通道数# 5 32 16 4
out_channels = num_channels[i] # 确定每一层的输出通道数 32 16 4 1
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
#print("x=",x.shape)#x= torch.Size([64, 5, 6])
"""
输入x的结构不同于RNN,一般RNN的size为(Batch, seq_len, channels)或者(seq_len, Batch, channels),
这里把seq_len放在channels后面,把所有时间步的数据拼起来,当做Conv1d的输入尺寸,实现卷积跨时间步的操作,
很巧妙的设计。
:param x: size of (Batch, input_channel, seq_len)
:return: size of (Batch, output_channel, seq_len)
"""
out=self.network(x)
#print("out=",out.shape)torch.Size([64, 1, 6])
#out=self.linear(out)
#print("out=",out.shape)#([64, 1, 16])
#out=out[:,:,-1]
#print("out=", out.shape)
return out
#4注意力机制
class ScaledDotProductAttention(nn.Module):
'''
Scaled dot-product attention
'''
def __init__(self, d_model, d_k, d_v, h,dropout=0.1):
'''
:param d_model: Output dimensionality of the model 50
:param d_k: Dimensionality of queries and keys 50
:param d_v: Dimensionality of values 50
:param h: Number of heads 2
'''
super(ScaledDotProductAttention, self).__init__()
#print("d_model=",d_model)
self.fc_q = nn.Linear(d_model, h * d_k)#50 100
self.fc_k = nn.Linear(d_model, h * d_k)#50 100
self.fc_v = nn.Linear(d_model, h * d_v)#50 100
self.fc_o = nn.Linear(h * d_v, d_model)#100 50
self.dropout=nn.Dropout(dropout)
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):
'''
Computes
:param queries: Queries (b_s, nq, d_model)
:param keys: Keys (b_s, nk, d_model)
:param values: Values (b_s, nk, d_model)
:param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.
:param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).
:return:
'''
#print("queries=",queries.shape)#queries= torch.Size([32, 4, 50])
b_s, nq = queries.shape[:2]#32 4
nk = keys.shape[1]#4
q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) # (b_s, h, nq, d_k) 32 4 2 50
k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) # (b_s, h, d_k, nk)32 4 2 50
v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) # (b_s, h, nk, d_v)32 4 2 50
# print("q=",q.shape)
# print("k=",k.shape)
# print("v=",v.shape)
att = torch.matmul(q, k) / np.sqrt(self.d_k) # (b_s, h, nq, nk)
#print("att=", att
没有合适的资源?快使用搜索试试~ 我知道了~
基于TCN-Transformer模型的时间序列预测(Python完整源码)
共4个文件
py:2个
pyc:1个
csv:1个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
5星 · 超过95%的资源 9 下载量 18 浏览量
2023-11-21
20:20:40
上传
评论 19
收藏 33KB ZIP 举报
温馨提示
基于TCN-Transformer模型的时间序列预测(Python完整源码),可以用于做光伏发电功率预测,风速预测,风力发电功率预测,负荷预测等,python程序 python代码,pytorch 基于TCN-Transformer模型的时间序列预测(Python完整源码),可以用于做光伏发电功率预测,风速预测,风力发电功率预测,负荷预测等,python程序 python代码,pytorch 基于TCN-Transformer模型的时间序列预测(Python完整源码),可以用于做光伏发电功率预测,风速预测,风力发电功率预测,负荷预测等,python程序 python代码,pytorch
资源推荐
资源详情
资源评论
收起资源包目录
TCN-Transfomer.zip (4个子文件)
TCN-Transfomer
data
300808.SZ.csv 61KB
model.py 9KB
train.py 6KB
__pycache__
model.cpython-37.pyc 8KB
共 4 条
- 1
前程算法屋
- 粉丝: 4174
- 资源: 712
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
前往页