import com.googlecode.javacpp.FloatPointer;
import com.googlecode.javacpp.Pointer;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
import static com.googlecode.javacv.cpp.opencv_highgui.*;
import static com.googlecode.javacv.cpp.opencv_highgui.cvDestroyWindow;
import static com.googlecode.javacv.cpp.opencv_legacy.*;
import static com.googlecode.javacv.cpp.opencv_objdetect.CV_HAAR_DO_CANNY_PRUNING;
import static com.googlecode.javacv.cpp.opencv_objdetect.CV_HAAR_FIND_BIGGEST_OBJECT;
import static com.googlecode.javacv.cpp.opencv_objdetect.CV_HAAR_DO_ROUGH_SEARCH;
import static com.googlecode.javacv.cpp.opencv_objdetect.cvHaarDetectObjects;
import static com.googlecode.javacv.cpp.opencv_imgproc.CV_BGR2GRAY;
import static com.googlecode.javacv.cpp.opencv_imgproc.cvEqualizeHist;
import com.googlecode.javacpp.Loader;
import com.googlecode.javacv.*;
import com.googlecode.javacv.FrameGrabber.Exception;
import com.googlecode.javacv.cpp.opencv_core;
import com.googlecode.javacv.cpp.opencv_core.CvFont;
import com.googlecode.javacv.cpp.opencv_core.CvMemStorage;
import com.googlecode.javacv.cpp.opencv_core.CvRect;
import com.googlecode.javacv.cpp.opencv_core.CvScalar;
import com.googlecode.javacv.cpp.opencv_core.CvSeq;
import com.googlecode.javacv.cpp.opencv_core.IplImage;
import com.googlecode.javacv.cpp.opencv_objdetect.CvHaarClassifierCascade;
import static com.googlecode.javacv.cpp.opencv_core.*;
import static com.googlecode.javacv.cpp.opencv_imgproc.*;
import static com.googlecode.javacv.cpp.opencv_calib3d.*;
import static com.googlecode.javacv.cpp.opencv_objdetect.*;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.io.*;
public class FaceRecognizer{
private static final Logger LOGGER = Logger.getLogger(FaceRecognizer.class.getName());
private int nTrainFaces = 0;
private int nPersons=0;
private int nEigens = 0;
private int count=1;
private int countForFace=0;
private int countSavedFace=1;
private CvMat personNumTruthMat;
private CvMat eigenValMat;
private CvMat projectedTrainFaceMat;
private CvMat trainPersonNumMat=null;
final static List<String> personNames = new ArrayList<String>();
private CvHaarClassifierCascade cascade = new CvHaarClassifierCascade(cvLoad("data\\haarcascade_frontalface_alt2.xml"));
IplImage[] eigenVectArr;
IplImage[] trainingFaceImgArr;
IplImage[] testFaceImgArr;
IplImage pAvgTrainImg;
public static String personName;
private static String textName="unknow";
public static double g_confidence=0;
public FaceRecognizer() {
trainPersonNumMat = loadTrainingData();
}
private void learn(final String trainingFileName) {
int i;
// load training data
LOGGER.info("===========================================");
LOGGER.info("Loading the training images in " + trainingFileName);
trainingFaceImgArr = loadFaceImgArray(trainingFileName);
nTrainFaces = trainingFaceImgArr.length;
LOGGER.info("Got " + nTrainFaces + " training images");
if (nTrainFaces < 3) {
LOGGER.severe("Need 3 or more training faces\n"
+ "Input file contains only " + nTrainFaces);
return;
}
// do Principal Component Analysis on the training faces
doPCA();
LOGGER.info("projecting the training images onto the PCA subspace");
// project the training images onto the PCA subspace
projectedTrainFaceMat = cvCreateMat(
nTrainFaces, // rows
nEigens, // cols
CV_32FC1); // type, 32-bit float, 1 channel
// initialize the training face matrix - for ease of debugging
for (int i1 = 0; i1 < nTrainFaces; i1++) {
for (int j1 = 0; j1 < nEigens; j1++) {
projectedTrainFaceMat.put(i1, j1, 0.0);
}
}
LOGGER.info("created projectedTrainFaceMat with " + nTrainFaces + " (nTrainFaces) rows and " + nEigens + " (nEigens) columns");
if (nTrainFaces < 5) {
LOGGER.info("projectedTrainFaceMat contents:\n" + oneChannelCvMatToString(projectedTrainFaceMat));
}
final FloatPointer floatPointer = new FloatPointer(nEigens);
for (i = 0; i < nTrainFaces; i++) {
cvEigenDecomposite(
trainingFaceImgArr[i], // obj
nEigens, // nEigObjs
eigenVectArr, // eigInput (Pointer)
0, // ioFlags
null, // userData (Pointer)
pAvgTrainImg, // avg
floatPointer); // coeffs (FloatPointer)
if (nTrainFaces < 5) {
LOGGER.info("floatPointer: " + floatPointerToString(floatPointer));
}
for (int j1 = 0; j1 < nEigens; j1++) {
projectedTrainFaceMat.put(i, j1, floatPointer.get(j1));
}
}
if (nTrainFaces < 5) {
LOGGER.info("projectedTrainFaceMat after cvEigenDecomposite:\n" + projectedTrainFaceMat);
}
// store the recognition data as an xml file
storeTrainingData();
// Save all the eigenvectors as images, so that they can be checked.
storeEigenfaceImages();
}
private IplImage convertImageToGreyscale(IplImage imageSrc)
{
IplImage imageGrey;
// Either convert the image to greyscale, or make a copy of the existing greyscale image.
// This is to make sure that the user can always call cvReleaseImage() on the output, whether it was greyscale or not.
if (imageSrc.nChannels()==3) {
imageGrey = cvCreateImage( cvGetSize(imageSrc), IPL_DEPTH_8U, 1 );
cvCvtColor( imageSrc, imageGrey, CV_BGR2GRAY );
}
else {
imageGrey = cvCloneImage(imageSrc);
}
return imageGrey;
}
private IplImage resizeImage(IplImage origImg, int newWidth, int newHeight)
{
IplImage outImg = null;
int origWidth=0;
int origHeight=0;
if (origImg!=null) {
origWidth = origImg.width();
origHeight = origImg.height();
}
if (newWidth <= 0 || newHeight <= 0 || origImg == null || origWidth <= 0 || origHeight <= 0) {
LOGGER.info("ERROR in resizeImage: Bad desired image size of");
LOGGER.info(String.valueOf(newWidth)+","+String.valueOf(newHeight));
System.exit(1);
}
// Scale the image to the new dimensions, even if the aspect ratio will be changed.
outImg = cvCreateImage(cvSize(newWidth, newHeight), origImg.depth(), origImg.nChannels());
if (newWidth > origImg.width() && newHeight > origImg.height()) {
// Make the image larger
cvResetImageROI((IplImage)origImg);
cvResize(origImg, outImg, CV_INTER_LINEAR); // CV_INTER_CUBIC or CV_INTER_LINEAR is good for enlarging
}
else {
// Make the image smaller
cvResetImageROI((IplImage)origImg);
cvResize(origImg, outImg, CV_INTER_AREA); // CV_INTER_AREA is good for shrinking / decimation, but bad at enlarging.
}
return outImg;
}
private IplImage cropImage(IplImage img, CvRect region)
{
IplImage imageTmp;
IplImage imageRGB;
CvSize size;
// size.height()=img.height();
// size.width() = img.width();
if (img.depth() != IPL_DEPTH_8U) {
LOGGER.info("ERROR in cropImage: Unknown image depth of");
LOGGER.info(String.valueOf(img.depth()));
LOGGER.info(" given in cropImage() instead of 8 bits per pixel.");
System.exit(1);
}
// First create a new (color or greyscale) IPL Image and copy contents of img into it.
imageTmp = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, img.nChannels());
cvCopy(img, imageTmp);
// Create a new image of the detected region
// Set