Face Recognition No. 3 Recognize faces in video streams (cameras)

The previous two articles talked about the establishment of the face database and the training of the model, so as to prepare for face recognition in this article.

This article will build on the work done in the previous two articles.


First, copy the three training model files (MyFaceFisherModel.xml, MyFaceLBPHModel.xml, MyFacePCAModel.xml) obtained in the previous article to the current directory, because these three files are used as the basis for face recognition.

This article also uses three methods to identify.

The approximate process is as follows:

1. Turn on the camera;

2. Load face detector and face model;

3. Scale the image (improve the efficiency), detect the face and frame it;

4. Identify faces (compared with those in the face model).


Code [opencv-3.2.0 version]:

#include <opencv2/opencv.hpp>  
#include "opencv2/core.hpp"
#include "opencv2/face.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <fstream>
#include <sstream>

using namespace cv;
using namespace cv::face;
using namespace std;

#define ROW_MIN		45

int exitFlag = 0;

int DetectAndDraw(Mat& img, CascadeClassifier& cascade,
							double scale, Mat& facesImg);

intmain()
{  
	int ret = 0;
	double scale = 4;

    VideoCapture cap(0); //Open the camera  
    if (!cap.isOpened())  
    {  
    	printf("open camera failed.\n");
        return -1;  
    }  

  	// load cascade classifier
    CascadeClassifier cascade;
    ret = cascade.load("/root/library/opencv/opencv-3.2.0/data/haarcascades/haarcascade_frontalface_alt.xml");  
	if (! ret)
	{
		printf("load xml failed[ret=%d].\n", ret);
		return -1;
	}
	cout << "load xml succeed. " << endl;

	// Load the trained face model
    Ptr <BasicFaceRecognizer> modelPCA = createEigenFaceRecognizer ();  
    modelPCA->load("MyFacePCAModel.xml");  
    Ptr<BasicFaceRecognizer> modelFisher = createFisherFaceRecognizer();  
    modelFisher->load("MyFaceFisherModel.xml");  
    Ptr<LBPHFaceRecognizer> modelLBPH = createLBPHFaceRecognizer();  
    modelLBPH->load("MyFaceLBPHModel.xml");  
      
	Mat frame;  
    while (!exitFlag)  
    {  
        cap >> frame;  
		if(frame.empty())
		{
			continue;
		}
        //Create a vector container for storing faces  
        Mat faces;  

		ret = DetectAndDraw(frame, cascade, scale, faces);
		if(ret <= 0) // no face
		{
			cout << "faces.size <= 0" << endl;
			continue;
		}

        Mat face_resize;  
        int predictPCA = 0;  
        int predictFisher = 0;  
        int predictLBPH = 0;  
        if (faces.rows >= ROW_MIN) // control face can not be too small (far)
        {  
            resize(faces, face_resize, Size(92, 112));  
        }  
		else
		{
			printf("face.rows[%d] < %d\n", faces.rows, ROW_MIN);
			continue;
		}
		
        if (!face_resize.empty())
        {  
            //The test image should be grayscale  
            predictPCA = modelPCA->predict(face_resize);  
            predictFisher = modelFisher->predict(face_resize);  
            predictLBPH = modelLBPH->predict(face_resize);  
        }  
  
        cout << "predictPCA   : " << predictPCA    << endl;  
        cout << "predictFisher: " << predictFisher << endl;  
        cout << "predictLBPH  : " << predictLBPH   << endl;  
  
        if (waitKey(1) == 27)  	// Esc
        {
			exitFlag = 1;
			cout << "Esc..." << endl;
			break;
        }
	}  
  
    return 0;  
}  

/* Parameters: input image, cascade classifier, scaling factor, output face*/
int DetectAndDraw(Mat& img, CascadeClassifier& cascade, double scale,
							Mat& facesImg)
{
    double t = 0;
    Mat gray;
    Mat GrayImg;  
	vector<Rect> faces;
    double fx = 1 / scale;
	
    cvtColor( img, gray, COLOR_BGR2GRAY ); // Convert the source image to grayscale

	/* scale the image */
	resize( gray, GrayImg, Size(), fx, fx, INTER_LINEAR);

    equalizeHist( GrayImg, GrayImg ); // Histogram equalization to improve image quality

	/* detect target */
    t = (double)getTickCount();
    cascade.detectMultiScale( GrayImg, faces,
        1.1, 2, 0
        //|CASCADE_FIND_BIGGEST_OBJECT
        //|CASCADE_DO_ROUGH_SEARCH
        |CASCADE_SCALE_IMAGE,
        Size(30, 30) );
    t = (double)getTickCount() - t;
    printf( "detection time = %g ms faces.size = %ld\n", t*1000/getTickFrequency(), faces.size());

	/* Draw a rectangle to outline the target */
    for ( size_t i = 0; i < faces.size(); i++ ) // faces.size(): number of detected objects
    {
        Rect rectFace = faces[i];
		facesImg = GrayImg(faces[i]);
		
		rectangle(img, faces[i], Scalar(255, 0, 0), 1, 8, 0); // scaled face

		rectangle(	img, Point(rectFace.x, rectFace.y) * scale,
					Point(rectFace.x + rectFace.width, rectFace.y + rectFace.height) * scale,
					Scalar(0, 255, 0), 2, 8); // restored face
    }
	
    imshow( "FaceDetect", img ); // show
	if(waitKey(1) == 27) // add delay after display
	{
		exitFlag = 1;
	}

	return faces.size();
}


Compile and run results:

detection time = 23.5059 ms faces.size = 1
predictPCA   : 1
predictFisher: 1
predictLBPH  : 1
Esc...



Note: The blue frame is the zoomed face position, and the green frame is the restored face position.




Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=325383135&siteId=291194637