import os
import random
from sklearn.model_selection import train_test_split
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
from numpy import array
from sklearn.preprocessing import OneHotEncoder # 用于one-hot编码
from sklearn import preprocessing
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.layers import Permute, Multiply
from tensorflow.python.keras import Input
from tensorflow.python.keras.layers import GRU
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.models import Model
import pandas as pd
from imblearn.over_sampling import SMOTE
import numpy as np
import matplotlib.pyplot as plt
class woa():
# 初始化
def __init__(self, X_train, LB, UB, dim=4, b=1, whale_num=20, max_iter=500):
self.X_train = X_train
self.test_x = test_x
self.LB = LB
self.UB = UB
self.dim = dim
self.whale_num = whale_num
self.max_iter = max_iter
self.b = b
# Initialize the locations of whale
self.X = np.random.uniform(0, 1, (whale_num, dim)) * (UB - LB) + LB
self.gBest_score = np.inf
self.gBest_curve = np.zeros(max_iter)
self.gBest_X = np.zeros(dim)
# 适应度函数 max_depth,min_samples_split,min_samples_leaf,max_leaf_nodes
def fitFunc(self, para):
# 使用最优的参数再次训练模型
GRU_units1 = int(para[0])
GRU_units2 = int(para[1])
epoch = int(para[2])
time_s1tep = int(para[3])
batch_size = int(para[4])
Dropout1 = para[5]
Dropout2 = para[6]
print([GRU_units1, GRU_units2, Dropout1, Dropout2, epoch, time_s1tep, batch_size])
# 标准化处理
scaler = preprocessing.StandardScaler().fit(self.X_train) # 这一步可以得到scaler,scaler里面train_x存的有计算出来的均值和方差
train_x = scaler.transform(self.X_train) # 这一步再用scaler中的均值和方差来转换train_x,使train_x标准化
train_x = train_x.reshape((-1, time_s1tep, input_size)) # 转换为GRU输入格式
# 测试集数据
test_x = scaler.transform(self.test_x)
test_x = test_x.reshape((-1, time_s1tep, input_size))
# 建立模型
def attention_block(inputs):
input_dim = int(inputs.shape[2])
a = inputs
a = Dense(input_dim, activation='softmax')(a)
a_probs = Permute((1, 2), name='attention_vec')(a)
output_attention_mul = Multiply()([inputs, a_probs])
return output_attention_mul
def bp_lstm(dr1, lstm_units, dr2, lstm_units1):
inputs = Input(shape=(1, 7))
out = GRU(lstm_units, return_sequences=True)(inputs)
out = Dropout(dr1)(out)
out = GRU(lstm_units1, return_sequences=True)(out)
out = Dropout(dr2)(out)
attention_mul = attention_block(out)
attention_mul = Flatten()(attention_mul)
output = Dense(3, activation='softmax')(attention_mul)
model = Model(inputs=[inputs], outputs=output)
return model
model = bp_lstm(Dropout1, GRU_units1, Dropout2, GRU_units2)
# 编译模型
# model.compile(loss = 损失函数,optimizer = 优化器,metrics = ["准确率”]
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']) # 'categorical_crossentropy'损失函数一般搭配softmax激活函数使用
# 训练拟合模型
history = model.fit(train_x, train_y,
epochs=epoch,
batch_size=batch_size,
validation_data=(test_x, test_y),
verbose=0,
shuffle=True)
print(history.history['val_loss'][-1])
return history.history['val_loss'][-1]
# 优化模块
def opt(self):
t = 0
while t < self.max_iter:
print('At iteration: ' + str(t))
for i in range(self.whale_num):
# 防止X溢出
self.X[i, :] = np.clip(self.X[i, :], self.LB, self.UB) # Check boundries
fitness = self.fitFunc(self.X[i, :])
# Update the gBest_score and gBest_X
if fitness <= self.gBest_score:
self.gBest_score = fitness
self.gBest_X = self.X[i, :].copy()
print('self.gBest_score: ', self.gBest_score)
print('self.gBest_X: ', self.gBest_X)
a = 2 * (self.max_iter - t) / self.max_iter
# Update the location of whales
for i in range(self.whale_num):
p = np.random.uniform()
R1 = np.random.uniform()
R2 = np.random.uniform()
A = 2 * a * R1 - a
C = 2 * R2
l = 2 * np.random.uniform() - 1
# 如果随机值大于0.5 就按以下算法更新X
if p >= 0.5:
D = abs(self.gBest_X - self.X[i, :])
self.X[i, :] = D * np.exp(self.b * l) * np.cos(2 * np.pi * l) + self.gBest_X
else:
# 如果随机值小于0.5 就按以下算法更新X
if abs(A) < 1:
D = abs(C * self.gBest_X - self.X[i, :])
self.X[i, :] = self.gBest_X - A * D
else:
rand_index = np.random.randint(low=0, high=self.whale_num)
X_rand = self.X[rand_index, :]
D = abs(C * X_rand - self.X[i, :])
self.X[i, :] = X_rand - A * D
self.gBest_curve[t] = self.gBest_score
t += 1
return self.gBest_curve, self.gBest_X
# ---- 数据导入 ----
raw_data = pd.read_csv(r'C:\Projects\鲸鱼算法分别优化cart和加了注意力的GRU\train_data.csv', header=0)
origin_data_x= raw_data.iloc[:,1:].values #取第二列到最后一列为数据
origin_data_y=raw_data.iloc[:,0].values #取第一列为标签
# 处理样本不平衡
oversample = SMOTE()
origin_data_x, origin_data_y = oversample.fit_resample(origin_data_x, origin_data_y)
index = [j for j in range(len(origin_data_x))]#index为一个列表[0-236],即样本索引
random.shuffle(index)#将索引随机打乱顺序
origin_data_y = origin_data_y[index]#按照新的乱序索引得到新的乱序后的origin_data_y和origin_data_x
origin_data_x = origin_data_x[index]
# 获取每种标签的数量
from collections import Counter
a = Counter(origin_data_y)
print(dict(a))
# ---- 参数定义----
input_size=7 #特征数量
#对标签(labels)进行one-hot编码,是将类别变量(离散值)转换为机器学习算法易于利用的一种形式的过程,用于分类问题(适用于无序数据),https://developer.aliyun.com/article/126741
def label2hot(labels):
values = array(labels)#values是标签序列,已经是整数编码,不需要再将类别转换为整数
#one-hot编码(binary encode)
onehot_encoder = OneHotEncoder(sparse=False)#OneHotEncoder():将分类特征的每个元素转化为一个可以用来计算的值
integer_encoded = values.reshape(len(values), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
return onehot_encoded
hot_data_y=label2hot(origin_data_y[:])#调用函数label2hot()
#hot_data_y.append(onehot_encoded)
#hot_data_y=array(hot_data_y).transpose((1,0,2))
# L2正则化
origin_data_x = preprocessing.normalize(origin_data_x,norm='l2',axis=1,return_norm=False)
train_x,test_x,train_y,test_y = train_test_split(origin_data_x, hot_data_y, test_size=0.2,
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
鲸鱼算法分别优化cart和加了注意力的GRU.zip plt.show() # 可视化 plot_performance(history=history) # 验证模型 y_pred = model.predict(test_x) # 预测类别,返回的是概率 y_pred = y_pred.argmax(axis=1) # 将概率转换为类别 y_true = test_y.argmax(axis=1) # 真实类别 from sklearn.metrics import classification_report lables_name = ['0', '1', '2'] print(classification_report(y_true, y_pred, target_names=lables_name)) # === 混淆矩阵:真实值与预测值的对比 === from sklearn.metrics import confusion_matrix # 生成混淆矩阵的函数 C = confusion_matrix(y_true, y_pred) # 计
资源推荐
资源详情
资源评论
收起资源包目录
鲸鱼算法分别优化cart和加了注意力的GRU.zip (9个子文件)
CART(1).py 1KB
GRU-attention.py 7KB
WOA_CART.py 5KB
train_data.csv 29KB
WOA.py 3KB
GRU_WOA.py 11KB
GRU_WOA_attention.py 12KB
sklearn_CART.py 1KB
LSTM_WOA.py 8KB
共 9 条
- 1
资源评论
- 2301_788659322023-12-03这个资源对我启发很大,受益匪浅,学到了很多,谢谢分享~
AI信仰者
- 粉丝: 1w+
- 资源: 143
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功