import math;
import random;
from matplotlib import pyplot as plt
from tensorflow import keras
from keras.optimizers import Adam
from core.utils import Timer
from keras.models import Sequential
# from keras.models import Sequential
from keras.layers.core import Dense
import numpy as np
import matplotlib.animation as animation
from PIL import Image
# from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
import os
import pandas as pd
# from tensorflow.examples.tutorials.mnist import *
from keras.models import *
from keras.layers import *
from keras import callbacks
# import imageio
# from PyQt5 import QtWidgets
import time
import os
import psutil
def show_info(start):
pid = os.getpid()
#模块名比较容易理解:获得当前进程的pid
p = psutil.Process(pid)
#根据pid找到进程,进而找到占用的内存值
info = p.memory_full_info()
memory = info.uss/1024
return memory
def count_time(func):
def int_time(*args, **kwargs):
start_time = time.time()#这个是程序开始时间
func()
over_time = time.time()#这个是程序结束时间
total_time = over_time - start_time
# print('程序共计%s秒' % total_time)
return int_time
def count_info(func):
def float_info(*args, **kwargs):
pid = os.getpid()
# 模块名比较容易理解:获得当前进程的pid
p = psutil.Process(pid)
# 根据pid找到进程,进而找到占用的内存值
info_start = p.memory_full_info().uss/1024
func()
info_end=p.memory_full_info().uss/1024
# print('此程序运行占内存'+str(info_end-info_start)+'kB')
return float_info
# def create_gif(path, gif_name, duration):
# frames = []
# image_list = []
# for i in range(0,1999):
# image_list.append(path+"/%d.jpg" % i)
# for image_name in image_list:
# frames.append(imageio.imread(image_name))
# imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
# return
inputfile = 'input.xlsx' # excel输入
outputfile = 'output.xls' # excel输出
modelfile = 'modelweight.model'
os.chdir(
"D:/nn_bp");
# training_set = pd.read_csv("Social_Network_Ads.csv");
training_set1 = pd.read_csv("D:/nn_predict/traindata.csv") # pandas以DataFrame的格式读入excel表
feature = ['ac', 'cnl', 'gr', 'rs', 'den'] # 影响因素四个
label = ['so2'] # 标签一个,即需要进行预测的值
depth = ['depth']
training_set = training_set1.loc[range(0, 54)].copy() # 标明excel表从第0行到58行是训练集
# print(training_set[label])
# 2 数据预处理和标注
data_mean = training_set.mean()
data_std = training_set.std()
data_train = (training_set - data_mean) / data_std # 数据标准化
y_data_train = (training_set) / training_set.max()
k = training_set[label].max().values
def getfit(model, x):
# x = ((data_train[feature] - data_mean[feature]) / data_std[feature]).as_matrix()
# y = (model.predict(x)) * data_std[label].values + data_mean[label].values
y=(model.predict(x))*k
return y
def init():
fpath = "D:/nn_bp/image/0.jpg";
img = Image.open(fpath);
plt.axis('off')# 关掉坐标轴为 off
return plt.imshow(img);
def update(i):
fpath = "D:/nn_bp/image" + str(i) + ".jpg";
img = Image.open(fpath);
plt.axis('off') # 关掉坐标轴为 off
return plt.imshow(img);
def bp_main(x1,x2):
# print(k)
start = show_info('开始')
x_train = data_train[feature].iloc[:, :].values # 特征数据
y_train = y_data_train[label].iloc[:, :].values # 标签数据
depth_test = training_set[depth]
y_plot = training_set[label]
# print(x_train)
# print(y_train)
#########预测数据x
predict_set = pd.read_csv("D:/nn_predict/traindata+juedui.csv")
feature1 = ['a', 'b', 'cc', 'd', 'e']
predict_set_feature = predict_set.loc[range(0, 57)].copy()
predict_mean = predict_set_feature.mean()
predict_std = predict_set_feature.std()
predict_data = (predict_set_feature - predict_mean) / predict_set
predict_data_x = predict_data[feature1].iloc[:, :].values
filepath = "D:/nn_bp/model/weights-improvement-{epoch:00d}.hdf5";
callbacks_list = [
# 监视变量monitor在patience个epoch间差值都小于阈值min_delta时停止训练,
# mode即该变量的类型,loss是min 越小越好,max越大越好 例如 精度
callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0.000001,
patience=2,
verbose=1,
mode='min'
),
# 保存训练过程中的最优模型,filepath保存路径
# monitor监视变量,保存训练过程中该值最优的模型,
# save_weights_only False相当于model.save()保存整个模型结构和权重 True 只保存权重
# save_best_only 是否覆盖当前最好的模型
callbacks.ModelCheckpoint(
filepath=filepath,
verbose=1,
monitor='val_loss',
save_weights_only=False,
mode='auto',
save_best_only=False
),
# 将训练过程的精度loss等写入文件中,append 当文件存在时 是否在尾部添加或是覆盖
callbacks.CSVLogger(
filename='loss/train.csv',
append=True
),
# 降低学习率,当监视变量在patience个epoch中都没有改善(根据min_delta阈值判断)时就降低学习率,
# lr = lr*factor,此处即减10倍,min_lr 学习率下限
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1, # lr*factor
patience=10,
mode='min',
min_delta=1e-4,
min_lr=0
)
]
low = 0;
up = 2 * math.pi;
# callback
# filepath = "D:/user/测井/中海油/nn_bp/model/weights-improvement-{epoch:00d}.hdf5";
# checkpoint = ModelCheckpoint(filepath, verbose=1, save_best_only=False, mode='auto');
# callbacks_list = [checkpoint];
#
# 建立顺序神经网络层次模型
# model = Sequential() # 层次模型
# model.add(Dense(64, input_dim=5, init='uniform',activation='sigmoid')) # 输入层,Dense表示BP层###目的:找到创新思路 问题:文献解决问题 到底有无创新性 思考:为什么作者找到创新方法 创新方法有无弊端 有无新的方法
# model.add(Dropout(0.5))
#
# model.add(Dense(32, init='uniform', activation='sigmoid'))
# model.add(Dropout(0.2))
#
# model.add(Dense(1, activation='tanh')) # 输出层
timer = Timer()
timer.start()
adam_lr = x1
adam_beta_1 = x2
# model.compile(loss='mean_squared_error', optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1), metrics=['accuracy']) # 编译模型loss='mean_squared_error'
model=Sequential()
model.add(Dense(input_dim=5, units=32, activation="relu"))######特征数
# model.add(Dropout(0.5))
model.add(Dense(units=16, activation="relu"))
# model.add(Dropout(0.5))
# model.add(Dense(units = 16, kernel_initializer = "uniform", activation="relu"))
# # # model.add(Dropout(0.5))
# model.add(Dense(units = 8, kernel_initializer = "uniform", activation="relu"))
# model.add(Dense(units=8, kernel_initializer="uniform", activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(units=1, activation="tanh"))#sigmoid
model.compile(optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1), loss="mean_squared_error", metrics=["accuracy"])
his = model.fit(x_train, y_train, epochs=2000, batch_size=5, callbacks=callbacks_list,verbose=1,) # 训练模型1000
- 1
- 2
- 3
- 4
- 5
- 6
前往页