%% Sparse reconstruction approach code: file 1
% EE368 project 2011: Ritesh Kolte and Abhishek Arora
% training dictionaries
up_factor = 3;
addpath('C:\Users\ritesh\Documents\MATLAB\ee 368\project\sparse\SR pairs');
im1 = imread('im1_small.png');
im1 = rgb2ycbcr(im1); im1 = double(im1(:,:,1));
im2 = imread('im2_small.png');
im2 = rgb2ycbcr(im2); im2 = double(im2(:,:,1));
im3 = imread('im3_small.png');
im3 = rgb2ycbcr(im3); im3 = double(im3(:,:,1));
im1_SR = imread('im1_SR_3.png');
im1_SR = rgb2ycbcr(im1_SR); im1_SR = double(im1_SR(6:end-5,6:end-5,1));
im2_SR = imread('im2_SR_3.png');
im2_SR = rgb2ycbcr(im2_SR); im2_SR = double(im2_SR(6:end-5,6:end-5,1));
im3_SR = imread('im3_SR_3.png');
im3_SR = rgb2ycbcr(im3_SR); im3_SR = double(im3_SR(6:end-5,6:end-5,1));
im1_low = imresize(im1,2,'bicubic');
im2_low = imresize(im2,2,'bicubic');
im3_low = imresize(im3,2,'bicubic');
%% image1
dim = size(im1_low);
num_training = (length(1:6:dim(1)-5))*(length(1:6:dim(2)-5));
Xh1 = zeros(81,num_training);
Yl1 = zeros(36,num_training);
num = 0;
for m = 1:6:dim(1)-5
for n = 1:6:dim(2)-5
mean = mean2(im1_low(m:m+5,n:n+5));
Yl1(:,num+1) = reshape(im1_low(m:m+5,n:n+5)-mean,[],1);
Xh1(:,num+1) = reshape(im1_SR(((m-1)*3/2)+1:((m-1)*3/2)+9,((n-1)*3/2)+1:((n-1)*3/2)+9)-mean,[],1);
num = num+1;
end
end
%% image2
dim = size(im2_low);
num_training = (length(1:6:dim(1)-5))*(length(1:6:dim(2)-5));
Xh2 = zeros(81,num_training);
Yl2 = zeros(36,num_training);
num = 0;
for m = 1:6:dim(1)-5
for n = 1:6:dim(2)-5
mean = mean2(im2_low(m:m+5,n:n+5));
Yl2(:,num+1) = reshape(im2_low(m:m+5,n:n+5)-mean,[],1);
Xh2(:,num+1) = reshape(im2_SR(((m-1)*3/2)+1:((m-1)*3/2)+9,((n-1)*3/2)+1:((n-1)*3/2)+9)-mean,[],1);
num = num+1;
end
end
%% image3
dim = size(im3_low);
num_training = (length(1:6:dim(1)-5))*(length(1:6:dim(2)-5));
Xh3 = zeros(81,num_training);
Yl3 = zeros(36,num_training);
num = 0;
for m = 1:6:dim(1)-5
for n = 1:6:dim(2)-5
mean = mean2(im3_low(m:m+5,n:n+5));
Yl3(:,num+1) = reshape(im3_low(m:m+5,n:n+5)-mean,[],1);
Xh3(:,num+1) = reshape(im3_SR(((m-1)*3/2)+1:((m-1)*3/2)+9,((n-1)*3/2)+1:((n-1)*3/2)+9)-mean,[],1);
num = num+1;
end
end
%% form dictionaries
Yl = [Yl1 Yl2 Yl3];
Xh = [Xh1 Xh2 Xh3];
clear Yl1 Yl2 Yl3 Xh1 Xh2 Xh3
clear im* dim mean num m n num_training
Dl = Yl;
Dh = Xh;
clear Xh Yl
% We tried solving the biconvex optimization problem below, but later
% decided not to use it since we didn't have enough training data and using
% raw patches as dictionaries was giving good results.
% % clear low_grad*
% % save('dictionaries')
% N = 81;
% M = 36;
% X_total = [Xh/sqrt(N); Yl/sqrt(M)];
% % beta = 25*(1/sqrt(N) + 1/sqrt(M));
% beta = 4;
% num_iters = 2;
% num_bases = 128;
% Binit = randn(81+36,num_bases);
% normalize = sqrt(diag(Binit'*Binit));
% Binit = Binit./(ones(81+36,1)*normalize');
%
% [B S stat] = sparse_coding(X_total, num_bases, beta, 'L1', [], num_iters, 1000, 'dictionaries', [], Binit);
%
% Dh = B(1:81,:)/sqrt(N);
% Dl = B(82:end,:)/sqrt(M);
%
% clear B Binit M N S X_total Xh Yl beta normalize num_iters stat