clc
clear
src_path='E:\人脸数据库\AR\'; %原始图片路径
dst_path='E:\人脸数据库\ARdiv\';%分割图片后保存路径
list = dir(src_path);%指定目录下所有子文件夹和文件
C = 3;%获取图片的的人数
img_list = list(3:end);%
index = [1, 10];%获取该人的第1张图片和第10张图片
size1=[128,128];%尺寸处理的参数
size2=[224,224];%尺寸处理的参数
for i = 1:C
disp(i)
for j = 1:2
im = imread(strcat(src_path,img_list((i-1)*26+index(j)).name));%读取人脸图片
im_ = imresize(im, size1);%尺寸处理
%分割图片
for m = 0:2
for n = 0:2
m_start=m*32+1;
m_end=m*32+64;
n_start=n*32+1;
n_end=n*32+64;
im_div=im_(m_start:m_end,n_start:n_end,:); %将每块读入矩阵
im_div = imresize(im_div, size2);%尺寸处理
imwrite(im_div,[dst_path 'm-' num2str(i) '-' num2str(j) '-' num2str((m*3)+(n+1)) '.png'],'png'); %保存子块图片并命名
end
end
end
end
% clear all;
% clc;
% % addpath PCA
% run(fullfile(fileparts(mfilename('fullpath')),...
% 'matconvnet-1.0-beta25', 'matlab', 'vl_setupnn.m')) ;%它的主要过程就是,先切换到脚本所在的目录,运行脚本,然后再返回原目录
% net = load('E:/VGGfaceModel/vgg-face.mat') ;%加载VGG-face模型
% list = dir('D:/R2018b_CN(64bit)/matlab2018b/AR');%获取指定目录下所有子文件夹和文件
% C = 100;%获取图像的总人数
% img_list = list(3:end);%获取list中目录的一部分(前两个保存的是点)
% index = [1,10];%获取该人的第1张图片和第10张图片
% %% 建立基于VGGFace的Gallery字典
% dictionary = [];%字典
% for i = 1:C
% disp(i)
% numEachGalImg(i) = 0;
% for j = 1:2
% im = imread(strcat('D:/R2018b_CN(64bit)/matlab2018b/AR/',img_list((i-1)*26+index(j)).name));%读取对应图片
% im_ = single(im) ; % 改为单精度
% im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ;%更改尺寸,使其符合模型的尺寸
% for k = 1:3
% im1_(:,:,k) = im_;
% end
% im2_ = bsxfun(@minus,im1_,net.meta.normalization.averageImage);%im1_减去net.meta.normalization.averageImage
% res = vl_simplenn(net, im2_) ;%关键命令是vl_simplenn作为CNN的输入的包装器net和预处理图像im_,并产生结果的结构Res作为输出
% feature_p(:,j) = res(36).x(:);%保存输出的特征(1*4096维向量)
% end
% numEachGalImg(i) = numEachGalImg(i) + size(feature_p,2);%获取feature_p的列数(记录每个人的照片数量)
% dictionary = [dictionary feature_p];%形成字典
% end
% %% PCA对特征进行降维
% FaceContainer = double(dictionary');
% [pcaFaces W meanVec] = fastPCA(FaceContainer,128);
% X = pcaFaces;
% [X,A0,B0] = scaling(X);
% LFWparameter.mean = meanVec;
% LFWparameter.A = A0;
% LFWparameter.B = B0;
% LFWparameter.V = W;
% imfo = LFWparameter;
% train_fea = (double(FaceContainer)-repmat(imfo.mean, size(FaceContainer,1), 1))*imfo.V;
% dictionary = scaling(train_fea,1,imfo.A,imfo.B);
% for i = 1:size(dictionary, 1)
% dictionary(i,:) = dictionary(i,:)/norm(dictionary(i,:));
% end
% dictionary = double(dictionary);
% totalGalKeys = sum(numEachGalImg);
% cumNumEachGalImg = [0; cumsum(numEachGalImg')];
%
% %% 利用稀疏编码进行特征匹配
% % sparse coding parameters
% if ~exist('opt_choice', 'var')
% opt_choice = 1;
% end
% num_bases = 128;
% beta = 0.4;
% batch_size = size(dictionary, 1);
% num_iters = 5;
% if opt_choice==1
% sparsity_func= 'L1';
% epsilon = [];
% elseif opt_choice==2
% sparsity_func= 'epsL1';
% epsilon = 0.01;
% end
%
% Binit = [];
%
% fname_save = sprintf('../results/sc_%s_b%d_beta%g_%s', sparsity_func, num_bases, beta, datestr(now, 30));
%
% AtA = dictionary*dictionary';
% for i = 1:C
% fprintf('%s \n',num2str(i));
% tic
% im = imread(strcat('../data/AR/',img_list((i-1)*26+26).name));
% im_ = single(im) ; % note: 255 range
% im_ = imresize(im_, net.meta.normalization.imageSize(1:2)) ;
% for k = 1:3
% im1_(:,:,k) = im_;
% end
% im2_ = bsxfun(@minus,im1_,net.meta.normalization.averageImage) ;
% res = vl_simplenn(net, im2_) ;
% feature_p = res(36).x(:);
% feature_p = (double(feature_p)'-imfo.mean)*imfo.V;
% feature_p = scaling(feature_p,1,imfo.A,imfo.B);
% feature_p = feature_p/norm(feature_p, 2);
% [B S stat] = sparse_coding(AtA,0, dictionary', double(feature_p'), num_bases, beta, sparsity_func, epsilon, num_iters, batch_size, fname_save, Binit);
% for m = 1:length(numEachGalImg)
% AA = S(cumNumEachGalImg(m)+1:cumNumEachGalImg(m+1),:);
% X1 = dictionary(cumNumEachGalImg(m)+1:cumNumEachGalImg(m+1),:);
% recovery = X1'*AA;
% YY(m) = mean(sum((recovery'-double(feature_p)).^2));
% end
% score(:,i) = YY;
% toc
% end
% accuracy = calrank(score1,1:1,'ascend');
% fprintf('rank-1:%d/%%\n',accuracy*100);