import tensorflow as tf
import pickle
import utils_cifar
import sys
import os
import time
import string
import random
import numpy as np
import utils_data
import set_data_path
try:
import cPickle
except:
import pickle as cPickle
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
##############Incremental Learning Setting######################
gpu = '0'
batch_size = 72 # Batch size
n = 5 # Set the depth of the architecture: n = 5 -> 32 layers (See He et al. paper)
nb_val = 0 # Validation samples per class
nb_cl = 10 # Classes per group
nb_groups = int(100/nb_cl)
nb_protos = 10 # Number of prototypes per class at the end: total protoset memory/ total number of classes
epochs = 50 # Total number of epochs
lr_old = 0.05 # Initial learning rate
lr_strat = [30, 40] # Epochs where learning rate gets decreased
lr_factor = 5. # Learning rate decrease factor
wght_decay = 0.00001 # Weight Decay
nb_runs = 1 # 总的执行次数 Number of runs (random ordering of classes at each run)10*10=100类
np.random.seed(1993) # Fix the random seed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
Cifar_train_file, Cifar_test_file, save_path = set_data_path.get_data_path()
################################################################
#loading dataset
print("\n")
# Initialization
dictionary_size = 500-nb_val
#top1_acc_list_cumul = np.zeros((100/nb_cl,3,nb_runs))
#top1_acc_list_ori = np.zeros((100/nb_cl,3,nb_runs))
#执行多次.................................
for step_classes in [2,5,10,20,50]:
save_model_path = save_path + 'step_' + str(step_classes) + '_classes' + '/NCM/'
nb_cl = step_classes # Classes per group
nb_groups = int(100 / nb_cl)
loss_batch = []
class_means = np.zeros((128, 100, 2, nb_groups))
files_protoset = []
for i in range(100):
files_protoset.append([])
for itera in range(nb_groups):#100/nb_cl
if itera == 0:#第一次迭代增加批次 后面网络被初始化 效率提高
epochs = 80
else:
epochs = 50
"""
1、先构建网络,定义一些变量
2、构建损失函数
3、构建循环网络
4、筛选保留集样本
5、先实现残差网络 再实现增量学习
6、实现简单的残差网络
"""
# Select the order for the class learning
order = np.load('./order.npy',encoding='latin1')
# Create neural network model
print('Run {0} starting ...'.format(itera))
print("Building model and compiling functions...")
image_train, label_train,image_test, label_test = utils_data.load_data(Cifar_train_file, Cifar_test_file)
#next batch
image_batch, label_batch_0, file_protoset_batch = utils_data.Prepare_train_data_batch(image_train,label_train,files_protoset,itera,order,nb_cl,batch_size)
label_batch = tf.one_hot(label_batch_0, 100)
#初次训练
if itera == 0:
#不需要蒸馏
variables_graph, variables_graph2, scores, scores_stored = utils_cifar.prepareNetwork(gpu,image_batch)
with tf.device('/gpu:0'):
scores = tf.concat(scores,0)
l2_reg = wght_decay * tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='ResNet34'))
loss_class = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch, logits=scores))
loss = loss_class + l2_reg
learning_rate = tf.placeholder(tf.float32, shape=[])
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)#需要修改下下
train_step = opt.minimize(loss,var_list=variables_graph)
elif itera >0:
#知识蒸馏
variables_graph, variables_graph2, scores, scores_stored = utils_cifar.prepareNetwork(gpu, image_batch)
#将上一次网络的输出作为软标签
op_assign = [(variables_graph2[i]).assign(variables_graph[i]) for i in range(len(variables_graph))]
with tf.device('/gpu:0'):
scores = tf.concat(scores, 0)
scores_stored = tf.concat(scores_stored, 0)
old_cl = (order[range(itera * nb_cl)]).astype(np.int32)
new_cl = (order[range(itera * nb_cl, nb_groups * nb_cl)]).astype(np.int32) # ?¥¥¥¥¥¥¥¥¥¥¥¥¥¥¥
label_old_classes = tf.sigmoid(tf.stack([scores_stored[:, i] for i in old_cl], axis=1))
label_new_classes = tf.stack([label_batch[:, i] for i in new_cl], axis=1)
pred_old_classes = tf.stack([scores[:, i] for i in old_cl], axis=1)
pred_new_classes = tf.stack([scores[:, i] for i in new_cl], axis=1)
l2_reg = wght_decay * tf.reduce_sum(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='ResNet34'))
loss_class = tf.reduce_mean(tf.concat(
[tf.nn.sigmoid_cross_entropy_with_logits(labels=label_old_classes, logits=pred_old_classes),
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_new_classes, logits=pred_new_classes)], 1))
loss = loss_class + l2_reg
learning_rate = tf.placeholder(tf.float32, shape=[])
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_step = opt.minimize(loss, var_list=variables_graph)
with tf.Session(config=config) as sess:
#Launch the data reader
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(tf.global_variables_initializer())
lr = lr_old
# Run the loading of the weights for the learning network and the copy network
if itera > 0:
void0 = sess.run([(variables_graph[i]).assign(save_weights[i]) for i in range(len(variables_graph))])
void1 = sess.run(op_assign)
print('training*****************************************************')
print("Batch of classes {} out of {} batches".format(itera, 100 / nb_cl))
for epoch in range(epochs): # 训练模型
print('Epoch %i' % epoch)
# print(len(files_from_cl))
for i in range(int(np.ceil(500*nb_cl/ batch_size))): # 5000/128
loss_class_val, _, sc, lab = sess.run([loss_class, train_step, scores, label_batch_0],
feed_dict={learning_rate: lr})
loss_batch.append(loss_class_val)
# Plot the training error every 10 batches
if len(loss_batch) == 10:
print("Training error:")
print(np.mean(loss_batch))
loss_batch = []
# Plot the training top 1 accuracy every 80 batches
# print('i=', i)
if (i + 1) % 20 == 0:
stat = []
stat += ([ll in best for ll, best in zip(lab, np.argsort(sc, axis=1)[:, -1:])])
stat = np.average(stat)
print('Training accuracy %f' % stat)
# Decrease the learning by 5 every 10 epoch after 20 epochs at the first learning rate
if epoch in lr_strat:
lr /= lr_factor
coord.request_stop()
coord.join(threads)
# copy weights to store network
print('saving model')
save_weights = sess.run([variables_graph[i] for i in range(len(variables_graph))])
utils_cifar.save_model(save_model_path + 'model-iterati
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
人工智能-项目实践-增量学习-基于优选保留集的增量学习.zip (26个子文件)
SVM_cifar-master
ReadMe 684B
utils_cifar.py 10KB
Thinking 44B
SVM_NET_test.py 0B
set_data_path.py 972B
do_what 1KB
main_Cifar_100.py 11KB
main_cifar_100_off_line.py 6KB
.idea
encodings.xml 185B
NCM_test.py 5KB
main_cifar_100_SVM.py 12KB
test_svmtree.py 917B
valid_resnet2.py 4KB
ICARL.py 13KB
valid_resnet.py 4KB
finetuning_and_offline_test.py 4KB
utils_data.py 10KB
order.npy 528B
do.py 101B
main_Cifar_100_finetuning.py 6KB
results_top5_acc_cumul_cl2.npy 528B
test.py 457B
SVM_test.py 5KB
main_cifar_100_NCM.py 14KB
svmtree.py 8KB
main_cifar_100_logits.py 11KB
共 26 条
- 1
资源评论
博士僧小星
- 粉丝: 2263
- 资源: 5991
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功