import os
from random import seed
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.keras import Input
from tensorflow.python.keras.layers import Bidirectional, GRU
from tensorflow.python.keras.layers import Dense, Conv1D
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.layers import Permute, Multiply
from tensorflow.python.keras.models import Model
matplotlib.rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
set_random_seed(11)
seed(7)
def attention_3d_block(inputs):
input_dim = int(inputs.shape[2])
a = inputs
a = Dense(input_dim, activation='softmax')(a)
a_probs = Permute((1, 2), name='attention_vec')(a)
output_attention_mul = Multiply()([inputs, a_probs])
return output_attention_mul
def bp_lstm(lstm_units, dense1, look_back):
inputs = Input(shape=(look_back, INPUT_DIMS))
x = Conv1D(filters=128, kernel_size=1, activation='relu')(inputs) # , padding = 'same'
lstm_out = Bidirectional(GRU(lstm_units, return_sequences=True, activation='relu'), name='bilstm')(x)
# For GPU you can use CuDNNLSTM cpu LSTM
# lstm_out = Bidirectional(CuDNNLSTM(lstm_units, return_sequences=True))(x)
attention_mul = attention_3d_block(lstm_out)
attention_mul = Flatten()(attention_mul)
output = Dense(dense1)(attention_mul)
output = Dense(pre_year, activation='linear')(output)
model = Model(inputs=[inputs], outputs=output)
return model
def fit_size(x, y):
from sklearn import preprocessing
x_MinMax = preprocessing.MinMaxScaler()
y_MinMax = preprocessing.MinMaxScaler()
x = x_MinMax.fit_transform(x)
y = y_MinMax.fit_transform(y)
return x, y, y_MinMax
# 预测未来1次的电池容量
def create_dataset(dataset, train_y, look_back):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), :]
dataX.append(a)
y = train_y[(i + look_back):(i + look_back + 1), 0].tolist()
dataY.append(y)
TrainX = np.array(dataX)
Train_Y = pd.DataFrame(dataY).values
return TrainX, Train_Y
src = r'C:\Projects\CNN-LSTM-attention+cnn+lstm三个模型\锂电池状态评估\Batterydataset (2)\Batterydataset\\'
os.chdir(src)
train_path = 'B0005.csv'
df = pd.read_csv(train_path, encoding='gbk')
# train_size = int(df.shape[0] * 0.8) # 0.036
train_size = int(df.shape[0] * 0.7)
train = df.iloc[:train_size, :]
test = df.iloc[train_size:, :]
train_y = df.values[:train_size, 0].reshape(-1, 1)
test_y = df.values[train_size:, 0].reshape(-1, 1)
# 归一化
train_x, train_y, train_y_MinMax = fit_size(train, train_y)
test_x, test_y, test_y_MinMax = fit_size(test, test_y)
# TRAIN
INPUT_DIMS = train.shape[1]
# 预测未来1次的容量
pre_year = 1
# 调参方法
def objective(trial):
lr = trial.suggest_uniform('lr', 0.002, 0.1)
batch_size = trial.suggest_int('batch_size', 4, 6)
lstm_units = trial.suggest_int('lstm_units', 33, 55)
dense1 = trial.suggest_int('dense1', 54, 99)
epochs = trial.suggest_int('epochs', 10, 100)
look_back = trial.suggest_int('look_back', 1, 9)
train_X, train_Y = create_dataset(train_x, train_y, look_back)
train_Y_data = pd.DataFrame(train_Y)
train_Y_data.dropna(inplace=True)
train_X = train_X[:train_Y_data.shape[0]]
train_Y = train_Y_data.values
train_X = train_X.astype('float64')
m = bp_lstm(lstm_units, dense1, look_back)
m.compile(loss='mae', optimizer='adam')
hist = m.fit(train_X, train_Y, epochs=epochs, batch_size=batch_size, verbose=0)
mae = hist.history['loss'][-1]
return mae
parameters = {'lr': 0.01782551732488114, 'batch_size': 6, 'lstm_units': 43, 'dense1': 83, 'epochs': 50, 'look_back': 2}
lstm_units = parameters['lstm_units']
lr = parameters['lr']
look_back = parameters['look_back']
batch_size = parameters['batch_size']
dense1 = parameters['dense1']
epochs = parameters['epochs']
train_X, train_Y = create_dataset(train_x, train_y, look_back)
m = bp_lstm(lstm_units, dense1, look_back)
m.compile(loss='mae', optimizer='Adam')
hist = m.fit(train_X, train_Y, epochs=epochs, batch_size=batch_size)
test_X, test_Y = create_dataset(test_x, test_y, look_back)
test_X = test_X.astype('float64')
pred_y = m.predict(test_X)
mae = mean_absolute_error(test_Y, pred_y)
print(mae)
inv_pred_y = test_y_MinMax.inverse_transform(pred_y.reshape(-1, 1))
# 画预测和真实值的对比图
plt.plot([x for x in range(train_size, 164)], inv_pred_y, color="red", label="pred")
plt.plot([x for x in range(167)], df.values[:, 0], color="blue", label="pred")
plt.ylabel('SOH')
plt.xlabel('循环次数')
plt.title('SOH pred vs test')
plt.legend(['SOH预测值', 'SOH真实值'], loc='best')
plt.savefig("SOH_test_real_pred.png", dpi=500, bbox_inches='tight')
plt.show()
没有合适的资源?快使用搜索试试~ 我知道了~
cnn-bigru-attention对电池寿命进行预测
共2个文件
py:1个
csv:1个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
5星 · 超过95%的资源 9 下载量 31 浏览量
2023-02-19
11:53:50
上传
评论 11
收藏 4KB ZIP 举报
温馨提示
cnn-bigru-attention对电池寿命进行预测 1、摘要 本文主要讲解:cnn-bigru-attention对电池寿命进行预测 主要思路: 建立cnn-bigru-attention模型 读取数据,将数据截成时序块 训练模型,调参,评估模型,保存模型 2、数据介绍 电池寿命数据 3、相关技术 GRU 相比LSTM,使用GRU能够达到相当的效果,准确率不会差多少,并且相比之下GRU更容易进行训练,能够很大程度上提高训练效率,因此硬件资源有限时会更倾向于使用GRU。 GRU结构图如下: 4、完整代码和步骤 此代码的依赖环境如下: tensorflow==2.5.0 numpy==1.19.5 keras==2.6.0 matplotlib==3.5.2 ———————————————— 版权声明:本文为CSDN博主「AI信仰者」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。 原文链接:https://blog.csdn.net/qq_30803353/article/details/129108978
资源推荐
资源详情
资源评论
收起资源包目录
cnn_bigru.zip (2个子文件)
cnn_bigru
B0005.csv 4KB
cnn_bigru.py 5KB
共 2 条
- 1
AI信仰者
- 粉丝: 1w+
- 资源: 143
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
- 3
- 4
前往页