#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 23 10:51:28 2017
@author: cdn
"""
import numpy as np
np.random.seed(1234)
import timeit
import os
import matplotlib.pyplot as plt
import scipy.io as sio
from sklearn.cross_validation import StratifiedKFold
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected, convolution2d, flatten, dropout
from tensorflow.python.layers.pooling import max_pooling3d
from tensorflow.python.ops.nn import relu,softmax
from tensorflow.python.framework.ops import reset_default_graph
import six.moves.cPickle as pickle
from confusionmatrix import ConfusionMatrix
def onehot(t, num_classes):
out = np.zeros((t.shape[0], num_classes))
for row, col in enumerate(t):
out[row, col] = 1
return out
def load_cv(cv_name = 'index_10fold.pkl',fold_idx = 0):
input_doc = open(cv_name,'rb')
in_data = pickle.load(input_doc)
input_doc.close()
train_idx,test_idx = in_data
train_index0 = train_idx[fold_idx]
test_index = test_idx[fold_idx]
val_index = test_idx[fold_idx-1]
train_index = list(set(train_index0)-set(val_index))
train_index = np.array(train_index)
return train_index,test_index,val_index
def load_data(fold_index):
data = sio.loadmat('ADNI/PET/Data_PET_d3.mat')
AD_data = data['AD_data_PET_d3']
NC_data = data['NORMAL_data_PET_d3']
# AD_data = data['AffineAD_128']
# NC_data = data['AffineNC_128']
ad_num,sizeX,sizeY,sizeZ = AD_data.shape
nc_num,_,_,_ = NC_data.shape
size_input = [1,sizeX,sizeY,sizeZ]
np.random.seed(1234)
random_idx = np.random.permutation(ad_num+nc_num)
adnc_data = np.concatenate((AD_data,NC_data),axis=0)[random_idx]
labels = np.hstack((np.ones((ad_num,)),np.zeros((nc_num,))))[random_idx]
trainid,testid,validid = load_cv(fold_idx = fold_index)
x_train = adnc_data[trainid]
y_train = labels[trainid]
x_test = adnc_data[testid]
y_test = labels[testid]
x_valid = adnc_data[validid]
y_valid = labels[validid]
return x_train,y_train,x_test,y_test,x_valid,y_valid,size_input
n_fold = 10
train_accuracy = np.zeros((n_fold,))
test_accuracy = np.zeros((n_fold,))
valid_accuracy = np.zeros((n_fold,))
t1_time = timeit.default_timer()
#for fi in range(n_fold):
num_classes = 2
num_filters_conv1 = 10
num_filters_conv2 = 25
num_filters_conv3 = 40
num_filters_conv4 = 40
dense_num = 100
size_conv = 3 # [height, width]
pool_size = 2
batch_size = 5
nb_epoch = 50
fi = 0
X_train,y_train,X_test,y_test,X_val,y_val,size_input = load_data(fi)
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1], X_train.shape[2],X_train.shape[3])
X_val = X_val.reshape(X_val.shape[0], 1, X_val.shape[1],X_val.shape[2],X_val.shape[3])
X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1], X_test.shape[2],X_test.shape[3])
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_val.shape[0], 'validate samples')
print(X_test.shape[0], 'test samples')
train_accuracy = np.zeros((n_fold,))
test_accuracy = np.zeros((n_fold,))
valid_accuracy = np.zeros((n_fold,))
t1_time = timeit.default_timer()
for fi in range(n_fold):
print('Now running on fold %d'%(fi+1))
num_classes = 2
x_train,y_train,x_test,y_test,x_valid,y_valid,size_input = load_data(fi)
nchannels,rows,cols,deps = size_input
x_train = x_train.astype('float32')
x_train = x_train.reshape((-1,nchannels,rows,cols,deps))
targets_train = y_train.astype('int32')
x_valid = x_valid.astype('float32')
x_valid = x_valid.reshape((-1,nchannels,rows,cols,deps))
targets_valid = y_valid.astype('int32')
x_test = x_test.astype('float32')
x_test = x_test.reshape((-1,nchannels,rows,cols,deps))
targets_test = y_test.astype('int32')
# define a simple feed forward neural network
# hyperameters of the model
num_classes = 2
channels = x_train.shape[1]
height = x_train.shape[2]
width = x_train.shape[3]
depth = x_train.shape[4]
num_filters_conv1 = 10
num_filters_conv2 = 25
num_filters_conv3 = 40
num_filters_conv4 = 40
kernel_size_conv1 = [3, 3, 3] # [height, width]
pool_size = [2,2,2]
stride_conv1 = [1,1,1] # [stride_height, stride_width]
num_l1 = 100
# resetting the graph ...
reset_default_graph()
# Setting up placeholder, this is where your data enters the graph!
x_pl = tf.placeholder(tf.float32, [None, channels, height, width, depth])
l_reshape = tf.transpose(x_pl, [0, 2, 3, 4, 1]) # TensorFlow uses NHWC instead of NCHW
is_training = tf.placeholder(tf.bool)#used for dropout
# Building the layers of the neural network
# we define the variable scope, so we more easily can recognise our variables later
l_conv1 = convolution2d(l_reshape, num_filters_conv1, kernel_size_conv1, stride_conv1,activation_fn=relu, scope="l_conv1")
l_maxpool1 = max_pooling3d(l_conv1,pool_size,pool_size)
l_conv2 = convolution2d(l_maxpool1, num_filters_conv2, kernel_size_conv1, stride_conv1,activation_fn=relu,scope="l_conv2")
l_maxpool2 = max_pooling3d(l_conv2,pool_size,pool_size)
l_conv3 = convolution2d(l_maxpool2, num_filters_conv3, kernel_size_conv1, stride_conv1,activation_fn=relu,scope="l_conv3")
l_maxpool3 = max_pooling3d(l_conv3,pool_size,pool_size)
l_conv4 = convolution2d(l_maxpool3, num_filters_conv4, kernel_size_conv1, stride_conv1,activation_fn=relu,scope="l_conv4")
l_flatten = flatten(l_conv4, scope="flatten") # use l_conv1 instead of l_reshape
l1 = fully_connected(l_flatten, num_l1, activation_fn=relu, scope="l1")
l1 = dropout(l1, is_training=is_training, scope="dropout")
y = fully_connected(l1, num_classes, activation_fn=softmax, scope="y")
# y_ is a placeholder variable taking on the value of the target batch.
y_ = tf.placeholder(tf.float32, [None, num_classes])
# computing cross entropy per sample
cross_entropy = -tf.reduce_sum(y_ * tf.log(y+1e-8), reduction_indices=[1])
# averaging over samples
cross_entropy = tf.reduce_mean(cross_entropy)
# defining our optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
# applying the gradients
train_op = optimizer.minimize(cross_entropy)
#Test the forward pass
# x = np.random.normal(0,1, (45, 1,65, 52, 51)).astype('float32') #dummy data
# restricting memory usage, TensorFlow is greedy and will use all memory otherwise
gpu_opts = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
# initialize the Session
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_opts))
sess.run(tf.global_variables_initializer())
# res = sess.run(fetches=[y], feed_dict={x_pl: x})
# res = sess.run(fetches=[y], feed_dict={x_pl: x, is_training: False}) # for when using dropout
# print "y", res[0].shape
#Training Loop
from confusionmatrix import ConfusionMatrix
batch_size = 10
num_epochs = 50
num_samples_train = x_train.shape[0]
num_batches_train = num_samples_train // batch_size
num_samples_valid = x_valid.shape[0]
num_batches_valid = num_samples_valid // batch_size
train_acc, train_loss = [], []
valid_acc, valid_loss = [], []
test_acc, test_loss = [], []
cur_loss = 0
loss = []
try:
for epoch in range(num_epochs):
#Forward->Backprob->Update params
cur_loss = 0
for i in range(num_batches_train):
idx = range(i*batch_size, (i+1)*batch_size)
x_batch = x_train[idx]
target_batch = targets_train[idx]
# feed_dict_train = {x_pl: x_batch, y_: onehot(target_batch, num_classes)}
feed_dict_train = {x_pl: x_batch, y_: onehot(target_batch, num_classes), is_training: True}
没有合适的资源?快使用搜索试试~ 我知道了~
利用tensorflow实现3DCNN
共2个文件
py:2个
4星 · 超过85%的资源 需积分: 49 242 下载量 136 浏览量
2017-04-25
15:33:58
上传
评论 5
收藏 5KB ZIP 举报
温馨提示
Tensorflow 3D CNN
资源推荐
资源详情
资源评论
收起资源包目录
tensorflow3dCNN.zip (2个子文件)
tensorflow3dCNN
3D_CNN_tensorflow.py 11KB
confusionmatrix.py 4KB
共 2 条
- 1
嘀嘀嗒嘀啊
- 粉丝: 84
- 资源: 2
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
前往页