from data_provider.data_factory import data_provider
from exp.exp_basic import Exp_Basic
from models import Informer, Autoformer, Transformer, LSTM, CNN, MLP, BiLSTM, CLA
from utils.tools import EarlyStopping, adjust_learning_rate, visual
from utils.metrics import metric
import numpy as np
import torch
import torch.nn as nn
from torch import optim
import os
import time
import warnings
import matplotlib.pyplot as plt
import numpy as np
warnings.filterwarnings('ignore')
# 传统迁移学习
# class Exp_Main_Transfer(Exp_Basic):
# def __init__(self, args):
# super(Exp_Main_Transfer, self).__init__(args)
#
# def _build_model(self):
# model_dict = {
# 'Autoformer': Autoformer,
# 'Transformer': Transformer,
# 'Informer': Informer,
# 'LSTM': LSTM,
# 'CNN': CNN,
# 'MLP': MLP,
# 'BiLSTM': BiLSTM,
# 'CLA': CLA
#
# }
# model = model_dict[self.args.model].Model(self.args).float()
#
# if self.args.use_multi_gpu and self.args.use_gpu:
# model = nn.DataParallel(model, device_ids=self.args.device_ids)
# return model
#
# def _get_data(self, flag):
# data_set, data_loader = data_provider(self.args, flag)
# return data_set, data_loader
#
# def _select_optimizer(self):
# model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
# return model_optim
#
# def _select_criterion(self):
# criterion = nn.MSELoss()
# return criterion
#
# def vali(self, vali_data, vali_loader, criterion):
# total_loss = []
# self.model.eval()
# with torch.no_grad():
# for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
# batch_x = batch_x.float().to(self.device)
# batch_y = batch_y.float()
#
# batch_x_mark = batch_x_mark.float().to(self.device)
# batch_y_mark = batch_y_mark.float().to(self.device)
#
# # decoder input
# dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
# dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
# # encoder - decoder
# if self.args.use_amp:
# with torch.cuda.amp.autocast():
# if self.args.output_attention:
# outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
# else:
# outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
# else:
# if self.args.model == 'LSTM' or self.args.model == 'CNN' or self.args.model == 'MLP' or self.args.model == 'BiLSTM' or self.args.model == 'CLA':
# outputs = self.model(batch_x)
# outputs = outputs[:, -self.args.pred_len:, :]
# else:
# if self.args.output_attention:
# outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
# else:
# outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark, batch_y)
#
# if self.args.features == 'MS':
# target_map = {'PH': 0, 'DO': 1, 'COD': 2, 'NH3-N': 3}
# set_target = target_map[self.args.target]
# f_dim = set_target
# batch_y = batch_y[:, -self.args.pred_len:, f_dim].unsqueeze(2).to(self.device)
# else:
# f_dim = 0
# batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
#
# pred = outputs.detach().cpu()
# true = batch_y.detach().cpu()
#
# loss = criterion(pred, true)
#
# total_loss.append(loss)
# total_loss = np.average(total_loss)
# self.model.train()
# return total_loss
#
# def tunning_train(self, setting):
# vali_data, vali_loader = self._get_data(flag='val')
# test_data, test_loader = self._get_data(flag='test')
# path = os.path.join(self.args.checkpoints, setting)
# print('loading model')
# self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))
#
# tunning_steps = len(vali_loader)
# print('tunning_steps:{} '.format(tunning_steps))
#
# model_optim = self._select_optimizer() # 优化器
# criterion = self._select_criterion() # 损失函数
#
# for epoch in range(self.args.train_epochs):
# iter_count = 0
# tunning_loss = []
# self.model.train()
#
# for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
# iter_count += 1
# model_optim.zero_grad()
# batch_x = batch_x.float().to(self.device)
# batch_y = batch_y.float()
#
# batch_x_mark = batch_x_mark.float().to(self.device)
# batch_y_mark = batch_y_mark.float().to(self.device)
#
# # decoder input
# dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
# dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)
# # encoder - decoder
#
# if self.args.model == 'LSTM' or self.args.model == 'CNN' or self.args.model == 'MLP' or self.args.model == 'BiLSTM' or self.args.model == 'CLA':
# outputs = self.model(batch_x)
# outputs = outputs[:, -self.args.pred_len:, :]
# else:
# if self.args.output_attention:
# outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
# else:
# outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark, batch_y)
#
# if self.args.features == 'MS':
# target_map = {'PH': 0, 'DO': 1, 'COD': 2, 'NH3-N': 3}
# set_target = target_map[self.args.target]
# f_dim = set_target
# batch_y = batch_y[:, -self.args.pred_len:, f_dim].unsqueeze(2).to(self.device)
# else:
# f_dim = 0
# batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
# loss = criterion(outputs, batch_y)
# tunning_loss.append(loss.item())
# loss.backward()
# model_optim.step()
#
# tunning_loss = np.average(tunning_loss)
# test_loss = self.vali(test_data, test_loader, criterion)
#
# print("Epoch: {0}, Steps: {1} | Tunning Loss: {2:.7f} Test Loss: {3:.7f}".format(
# epoch + 1, tunning_steps, tunning_loss, test_loss))
#
# torch.save(self.model.state_dict(), path + '/' + 'checkpoint.pth')
#
# return self.model
#
# def train(self, setting):
# train_data, train_loader = self._get_data(flag='train')
# vali_data, vali_loader = self._get_data(flag='val')
# test_data, test_loader = self._get_data(flag='test')
# # 水质三维时空数据调整为二维数据
#
# path = os.path.join(self.args.checkpoints, setting)
# if not os.path.exists(path):
# os.makedirs(path)
#
# time_now = time.time()
#
# train_steps = len(train_loader)
# print('train_steps:{} '.format(train_steps))
# early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
#
# model_optim = self._select_optimizer() # 优化器
# criterion = self._select_criterion() # 损失函数
#
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
基于深度迁移学习的水质预测研究.zip (68个子文件)
TLT-main-main
layers
__init__.py 0B
Autoformer_EncDec.py 6KB
AutoCorrelation.py 6KB
SelfAttention_Family.py 6KB
Transformer_EncDec.py 4KB
__pycache__
SelfAttention_Family.cpython-37.pyc 5KB
Embed.cpython-37.pyc 6KB
Autoformer_EncDec.cpython-37.pyc 6KB
AutoCorrelation.cpython-37.pyc 5KB
Transformer_EncDec.cpython-37.pyc 4KB
__init__.cpython-37.pyc 156B
Embed.py 5KB
data
__init__.py 1B
ETT
ETTh1.csv 2.47MB
water_norm.npy 413KB
water.npy 619KB
ETTh2.csv 2.31MB
water_denoise.npy 413KB
new_water.npy 1.27MB
ETTm2.csv 9.23MB
new_water_norm.npy 327KB
ETTm1.csv 9.88MB
data_loader.py 13KB
utils
__init__.py 0B
metrics.py 866B
masking.py 832B
download_data.py 328B
timefeatures.py 4KB
tools.py 3KB
__pycache__
timefeatures.cpython-37.pyc 6KB
metrics.cpython-37.pyc 1KB
tools.cpython-37.pyc 3KB
__init__.cpython-37.pyc 155B
masking.cpython-37.pyc 1KB
exp
__init__.py 0B
exp_main_transfer.py 38KB
exp_basic.py 885B
__pycache__
__init__.cpython-37.pyc 153B
exp_main.cpython-37.pyc 8KB
exp_main_transfer.cpython-37.pyc 11KB
exp_basic.cpython-37.pyc 2KB
exp_main.py 14KB
run.py 8KB
environment.yml 186B
models
Autoformer.py 4KB
__init__.py 0B
LSTM.py 658B
Informer.py 3KB
MLP.py 1KB
BiLSTM.py 786B
CNN.py 1024B
Transformer.py 3KB
CLA.py 1KB
__pycache__
Transformer.cpython-37.pyc 2KB
CNN.cpython-37.pyc 1KB
MLP.cpython-37.pyc 936B
Autoformer.cpython-37.pyc 3KB
CLA.cpython-37.pyc 1KB
Informer.cpython-37.pyc 3KB
__init__.cpython-37.pyc 156B
LSTM.cpython-37.pyc 1KB
BiLSTM.cpython-37.pyc 1KB
data_provider
__init__.py 1B
data_loader.py 31KB
__pycache__
data_loader.cpython-37.pyc 18KB
data_factory.cpython-37.pyc 1KB
__init__.cpython-37.pyc 163B
data_factory.py 2KB
共 68 条
- 1
资源评论
博士僧小星
- 粉丝: 1924
- 资源: 5885
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功