# -*- coding: utf-8 -*-
'''
本程序用于live2 视频数据库的 train-test 实验,每次实验80%训练,20%测试
inputdata:360个图片,每个图片分成220个32*32的图像块
traindata=360*0.8=292
testdata=360*0.2=73
'''
from __future__ import print_function
import scipy.io as sc
import numpy as np
import matplotlib.pyplot as plt
import os
import h5py
#import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
np.random.seed(1337) # for reproducibility
#from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.layers import Merge
CurrentPath=os.getcwd()
path1=CurrentPath+'/dataset/LIVE1/diff'
path2=CurrentPath+'/dataset/LIVE1/left'
path3=CurrentPath+'/dataset/LIVE1/right'
files= os.listdir(path1)#读取文件夹文件列表
files.sort()
#print(files)
files_l= os.listdir(path2)#读取文件夹文件列表
files_l.sort()
files_r= os.listdir(path3)#读取文件夹文件列表
files_r.sort()
#print(files)
X_train_diff=np.empty((292*220,32,32,1),dtype='float32')
y_train=np.empty((292*220,1),dtype='float32')
X_test_diff=np.empty((73*220,32,32,1),dtype='float32')
y_test=np.empty((73*220,1),dtype='float32') #astype('float32')
X_train_left=np.empty((292*220,32,32,1),dtype='float32')
X_test_left=np.empty((73*220,32,32,1),dtype='float32')
X_train_right=np.empty((292*220,32,32,1),dtype='float32')
X_test_right=np.empty((73*220,32,32,1),dtype='float32')
MosFile='DMOS_LIVE1.mat'
MosDic=sc.loadmat(MosFile)
MosData=MosDic['LIVE1']
MosData.astype('float32')
MosData=((MosData-min(MosData))/(max(MosData)-min(MosData)))*2-1
ExpNum=100#实验(train-test)的次数
for NumExp in xrange(ExpNum):
x=range(365)#qifeng库共450个视频
np.random.shuffle(x)#打乱视频顺序
for i in xrange(292):#取80%作为训练数据
dataFile=path1+'/'+files[x[i]]
tempLoad=sc.loadmat(dataFile)
temp=tempLoad['diff']
SingleData=np.rollaxis(temp,2,0)
X_train_diff[i*220:(i+1)*220,:,:,0]=SingleData#
y_train[i*220:(i+1)*220,:]=MosData[x[i]]#*np.ones(5100,1)
print(files[x[i]])
dataFile=path2+'/'+files_l[x[i]]
tempLoad=sc.loadmat(dataFile)
temp=tempLoad['left']
SingleData=np.rollaxis(temp,2,0)
X_train_left[i*220:(i+1)*220,:,:,0]=SingleData#
print(files_l[x[i]])
dataFile=path3+'/'+files_r[x[i]]
tempLoad=sc.loadmat(dataFile)
temp=tempLoad['right']
SingleData=np.rollaxis(temp,2,0)
X_train_right[i*220:(i+1)*220,:,:,0]=SingleData#
print(files_r[x[i]])
print('i:',i)
for i in xrange(73):
dataFile=path1+'/'+files[x[i+292]]
tempLoad=sc.loadmat(dataFile)
temp=tempLoad['diff']
SingleData=np.rollaxis(temp,2,0)
X_test_diff[i*220:(i+1)*220,:,:,0]=SingleData
y_test[i*220:(i+1)*220,:]=MosData[x[i+292]]
dataFile=path2+'/'+files_l[x[i+292]]
tempLoad=sc.loadmat(dataFile)
temp=tempLoad['left']
SingleData=np.rollaxis(temp,2,0)
X_test_left[i*220:(i+1)*220,:,:,0]=SingleData
dataFile=path3+'/'+files_r[x[i+292]]
tempLoad=sc.loadmat(dataFile)
temp=tempLoad['right']
SingleData=np.rollaxis(temp,2,0)
X_test_right[i*220:(i+1)*220,:,:,0]=SingleData
print('i:',i)
X_train_diff/=255#归一
X_test_diff/=255
X_train_left/=255#归一
X_test_left/=255
X_train_right/=255#归一
X_test_right/=255
batch_size = 128
nb_classes = 1
nb_epoch = 70
# input image dimensions
img_rows, img_cols = 32, 32
# number of convolutional filters to use
nb_filters = 64
# size of pooling area for max pooling
pool_size = (3, 3)
# convolution kernel size
kernel_size = (3, 3)
# the data, shuffled and split between train and test sets
#(X_train, y_train), (X_test, y_test) = mnist.load_data()
#if K.image_dim_ordering() == 'th':
# X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
# X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
# input_shape = (1, img_rows, img_cols)
#else:
# X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
# X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
# input_shape = (img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
print('X_train shape:', X_train_diff.shape)
print(X_train_diff[0], 'train samples')
print(X_train_diff[0], 'test samples')
diff_branch = Sequential()
diff_branch.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
diff_branch.add(Activation('relu'))
diff_branch.add(MaxPooling2D(pool_size=pool_size))
diff_branch.add(Dropout(0.25))
diff_branch.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
diff_branch.add(Activation('relu'))
diff_branch.add(MaxPooling2D(pool_size=(8,8)))
diff_branch.add(Dropout(0.25))
left_branch = Sequential()
left_branch.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
left_branch.add(Activation('relu'))
left_branch.add(MaxPooling2D(pool_size=pool_size))
left_branch.add(Dropout(0.25))
left_branch.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
left_branch.add(Activation('relu'))
left_branch.add(MaxPooling2D(pool_size=(8,8)))
left_branch.add(Dropout(0.25))
right_branch = Sequential()
right_branch.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
right_branch.add(Activation('relu'))
right_branch.add(MaxPooling2D(pool_size=pool_size))
right_branch.add(Dropout(0.25))
right_branch.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
right_branch.add(Activation('relu'))
right_branch.add(MaxPooling2D(pool_size=(8,8)))
right_branch.add(Dropout(0.25))
merged = Merge([diff_branch, left_branch, right_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Flatten())
final_model.add(Dense(300))
final_model.add(Activation('relu'))
final_model.add(Dropout(0.5))
final_model.add(Dense(nb_classes))
final_model.add(Activation('linear'))
#sgd=SGD()
final_model.compile(loss='mean_squared_error',
optimizer='rmsprop',
metrics=['mae'])
final_model.fit([X_train_diff,X_train_left,X_train_right],y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=([X_test_diff,X_test_left,X_test_right], y_test))
score = final_model.evaluate([X_test_diff,X_test_left,X_test_right], y_test,batch_size=batch_size)
out=final_model.predict([X_test_diff,X_test_left,X_test_right],batch_size=batch_size)
sc.savemat('LIVE1out_'+str(NumExp),{'out':out})
sc.savemat('LIVE1Dmos_'+str(NumExp),{'y_test':y_test})
out1=out.reshape(73,220)
out_mean2=np.mean(out1,axis=1)
y_test_re=y_test.reshape(73,220)
y_test_mean=np.mean(y_test_re,axis=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# batch_size = 128
# nb_classes = 1
# nb_epoch = 200#迭代次数
#
# # input image dimensions
# img_rows,img_cols,img_time = 32,32,10
# # number of convolutional filters to use
# nb_filters = (64,128)
# # size of pooling area for max pooling
#
评论0