import numpy as np
import os
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from matplotlib import pyplot
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from keras.models import Model
from keras.layers import Input,Lambda
from keras.layers import Dense, Flatten,Activation
from keras.layers import Conv1D, MaxPooling1D, Concatenate,AveragePooling1D
from keras.layers import LSTM, Bidirectional
from keras.callbacks import EarlyStopping
from keras.layers.normalization import BatchNormalization
from keras.layers import Dropout
from keras.layers import ELU
from keras.utils import to_categorical
from keras import backend as K
from attention import Attention_layer,HierarchicalAttentionNetwork
import scipy.io as scio
from sklearn.metrics import roc_curve
from sklearn.model_selection import KFold
path = '/1data/'
def data_process(data_0, data_1, data_2, data_y, time_step):
scaler_0 = StandardScaler()
scaler_1 = StandardScaler()
scaler_2 = StandardScaler()
scal_data_0 = scaler_0.fit_transform(data_0)
scal_data_1 = scaler_1.fit_transform(data_1)
scal_data_2 = scaler_2.fit_transform(data_2)
data_out0 = []
data_out1 = []
data_out2 = []
data_outy = []
for i in range(len(data_0) - time_step + 1):
data_out0_each = []
data_out1_each = []
data_out2_each = []
for j in range(time_step):
data_out0_each.append(scal_data_0[i + j])
data_out1_each.append(scal_data_1[i + j])
data_out2_each.append(scal_data_2[i + j])
data_outy.append(data_y[i + j])
data_out0.append(data_out0_each)
data_out1.append(data_out1_each)
data_out2.append(data_out2_each)
data0 = np.array(data_out0).reshape(-1, time_step, len(data_0[0]))
data1 = np.array(data_out1).reshape(-1, time_step, len(data_1[0]))
data2 = np.array(data_out2).reshape(-1, time_step, len(data_2[0]))
datay = np.array(data_outy).reshape(-1, 1)
return data0, data1, data2, datay, scaler_0, scaler_1, scaler_2
def main(save_confusion=False):
model = define_model()
print("Loaddata...")
data = pd.read_csv('./data.csv')
data_car = data.iloc[1:, 1:5].values.astype(float)
data_env = data.iloc[1:, 5:15].values.astype(float)
data_roa = data.iloc[1:, 15:-1].values.astype(float)
data_y = data.iloc[1:, -1].values.astype(int).reshape(-1, 1)-1
data0, data1, data2, datay, _, _, _ = data_process(data_car, data_env, data_roa, data_y, time_step=5)
X_train_0, X_valid_0, y_train, y_valid = train_test_split(data0, datay, test_size=0.01, random_state=25,stratify=datay)
X_train_1, X_valid_1, y_train, y_valid = train_test_split(data1, datay, test_size=0.01, random_state=25,stratify=datay)
X_train_2, X_valid_2, y_train, y_valid = train_test_split(data2, datay, test_size=0.01, random_state=25,stratify=datay)
y_valid = to_categorical(y_valid,3)
y_valid = y_valid[:,0:3]
kf = KFold(n_splits=10, shuffle=False)
acc = []
for k, (train_index, test_index) in enumerate(kf.split(X_train_0)):
if k >1:
model = define_model()
#print("TRAIN:", train_index, "TEST:", test_index)
X_train_k0, X_test_k0 = X_train_0[train_index], X_train_0[test_index]
X_train_k1, X_test_k1 = X_train_1[train_index], X_train_1[test_index]
X_train_k2, X_test_k2 = X_train_2[train_index], X_train_2[test_index]
yk_train, yk_test = y_train[train_index], y_train[test_index]
x_train = [X_train_k0, X_train_k1, X_train_k2]
x_test = [X_test_k0, X_test_k1,X_test_k2]
yk_train = to_categorical(yk_train,3)
yk_train = yk_train[:,0:3]
yk_test = to_categorical(yk_test,3)
yk_test = yk_test[:,0:3]
# run model training and evaluation
es = EarlyStopping(monitor='val_acc', mode='max', patience=10, verbose=1, restore_best_weights=True)
history = model.fit(x_train, yk_train, batch_size=16, epochs=10, verbose=1, validation_split=0.1, shuffle=False,validation_data=(x_test, yk_test),
callbacks=[es])
_, accuracy = model.evaluate([X_valid_0, X_valid_1, X_valid_2], y_valid, batch_size=16, verbose=0)
# plot history
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='val')
pyplot.legend()
#pyplot.savefig('./acc_valacc.jpg')
pyplot.show()
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='val')
pyplot.legend()
#pyplot.savefig('./acc_valacc.jpg')
pyplot.show()
# create test set and targets
y_prediction = model.predict(x_test)
#Save the superparameters and structure
#save_model(model)
evaluate_model(model, history, accuracy, yk_test, y_prediction, save_confusion)
return None
def define_model(time_step=5):
# first CNN input model cont
input1 = Input(shape=(time_step,4))
conv11 = Conv1D(20, 3, strides=1, padding='same')(input1)
acti11 = Activation('elu')(conv11)
pool11 = MaxPooling1D(pool_size=2, strides=1, padding='same')(acti11)
Drop11 = Dropout(0.3)(pool11)
conv12 = Conv1D(40, 5, strides=1, padding='same')(Drop11)
acti12 = Activation('elu')(conv12)
pool12 = MaxPooling1D(pool_size=2, strides=1, padding='same')(acti12)
Drop12 = Dropout(0.3)(pool12)
conv13 = Conv1D(80, 3, strides=1, padding='same')(Drop12)
acti13 = Activation('elu')(conv13)
pool13 = MaxPooling1D(pool_size=2, strides=1, padding='same')(acti13)
Drop13 = Dropout(0.3)(pool13)
lstm_out11 = LSTM(64,return_sequences=True)(Drop13)
lstm_out12 = LSTM(64,return_sequences=True)(lstm_out11)
# second CNN input model eye
input2 = Input(shape=(time_step,10))
conv21 = Conv1D(20, 10, strides=1, padding='same')(input2)
acti21 = Activation('elu')(conv21)
pool21 = MaxPooling1D(pool_size=2, strides=2, padding='same')(acti21)
Drop21 = Dropout(0.15)(pool21)
conv22 = Conv1D(40, 5, strides=1, padding='same')(Drop21)
acti22 = Activation('elu')(conv22)
pool22 = MaxPooling1D(pool_size=2, strides=2, padding='same')(acti22)
Drop22 = Dropout(0.15)(pool22)
conv23 = Conv1D(80, 3, strides=1, padding='same')(Drop22)
acti23 = Activation('elu')(conv23)
pool23 = MaxPooling1D(pool_size=2, strides=2, padding='same')(acti23)
Drop23 = Dropout(0.15)(pool23)
lstm_out21 = LSTM(64,return_sequences=True)(Drop23)
lstm_out22 = LSTM(64,return_sequences=True)(lstm_out21)
# third CNN input model phy
input3 = Input(shape=(time_step,4))
conv31 = Conv1D(20, 10, strides=1, padding='same')(input3)
acti31 = Activation('elu')(conv31)
pool31 = MaxPooling1D(pool_size=2, strides=1, padding='same')(acti31)
Drop31 = Dropout(0.15)(pool31)
conv32 = Conv1D(40, 5, strides=1, padding='same')(Drop31)
acti32 = Activation('elu')(conv32)
pool32 = MaxPooling1D(pool_size=2, strides=1, padding='same')(acti32)
Drop32 = Dropout(0.15)(pool32)
conv33 = Conv1D(80, 3, strides=1, padding='same')(Drop32)
acti33 = Activation('elu')(conv33)
pool33 = MaxPooling1D(pool_size=2, strides=1, padding='same')(acti33)
Drop33 = Dropout(0.15)(pool33)
lstm_out31 = LSTM(64,return_sequences=True)(Drop33)
lstm_out32 = LSTM(64,return_sequences=True)(lstm_out3
没有合适的资源?快使用搜索试试~ 我知道了~
CNN_LSTM_attention.zip
共6个文件
py:3个
jpg:2个
csv:1个
1.该资源内容由用户上传,如若侵权请联系客服进行举报
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
2.虚拟产品一经售出概不退款(资源遇到问题,请及时私信上传者)
版权申诉
0 下载量 122 浏览量
2023-08-25
13:53:23
上传
评论
收藏 316KB ZIP 举报
温馨提示
基于Tensorflow的项目实现 项目中包含数据集和代码实现
资源推荐
资源详情
资源评论
收起资源包目录
CNN_LSTM_attention.zip (6个子文件)
main.py 10KB
Loss.jpg 17KB
CNN_LSTM_attention .py 9KB
data.csv 784KB
attention.py 9KB
Accuracy.jpg 17KB
共 6 条
- 1
资源评论
sjx_alo
- 粉丝: 1w+
- 资源: 1216
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功