在此之前准备好我们的微调模型
# fine-tune 模型
def fine_tune_model(model, optimizer, batch_size, epochs, freeze_num):
'''
discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式
MODEL:传入的模型,VGG16, ResNet50, ...
optimizer: fine-tune all layers 的优化器, first part默认用adadelta
batch_size: 每一批的尺寸,建议32/64/128
epochs: fine-tune all layers的代数
freeze_num: first part冻结卷积层的数量
'''
# datagen = ImageDataGenerator(
# rescale=1.255,
# # shear_range=0.2,
# # zoom_range=0.2,
# # horizontal_flip=True,
# # vertical_flip=True,
# # fill_mode="nearest"
# )
# datagen.fit(X_train)
# first: 仅训练全连接层(权重随机初始化的)
# 冻结所有卷积层
for layer in model.layers[:freeze_num]:
layer.trainable = False
model.compile(optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"])
# model.fit_generator(datagen.flow(x_train,y_train,batch_size=batch_size),
# steps_per_epoch=len(x_train)/32,
# epochs=3,
# shuffle=True,
# verbose=1,
# datagen.flow(x_valid, y_valid))
model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=3,
shuffle=True,
verbose=1,
validation_data=(x_valid,y_valid)
)
print('Finish step_1')
# second: fine-tune all layers
for layer in model.layers[:]:
layer.trainable = True
rc = ReduceLROnPlateau(monitor="val_acc",
factor=0.2,
patience=4,
verbose=1,
mode='max')
model_name = model.name + ".hdf5"
mc = ModelCheckpoint(model_name,
monitor="val_acc",
save_best_only=True,
verbose=1,
mode='max')
el = EarlyStopping(monitor="val_acc",
min_delta=0,
patience=5,
verbose=1,
restore_best_weights=True)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=["accuracy"])
# history_fit = model.fit_generator(datagen.flow(x_train,y_train,batch_size=32),
# steps_per_epoch=len(x_train)/32,
# epochs=epochs,
# shuffle=True,
# verbose=1,
# callbacks=[mc,rc,el],
# datagen.flow(x_valid, y_valid))
history_fit = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=1,
validation_data=(x_valid,y_valid),
评论11
最新资源