OpenCV

Learning OpenCV

Face detection, Image warping, and trackbar

Posted by rmehran on February 9, 2008

This is a sample program that demonstrates the how to use face detection, image warping, and trackbar GUI in OpenCV. Thanks to Bilal Orhan for providing his example code for image warping.

(Download Source)

// simple_face_detect_warp_trackbar.cpp : Defines the entry point for the console application.
//

#include “stdafx.h”

#include <cv.h>
#include <highgui.h>
#include <math.h>
#include <string>

static const double pi = 3.14159265358979323846;
#define N 500

inline static double square(int a)
{
return a * a;
}

int thresh = 11;
double scale = 1;

// define a trackbar callback
void on_trackbar(int h)
{
int b = h-11;
if(b<0)
scale = -1.0/(b-1);
else if(b>0)
scale = b*1.0+1;
else
scale = 1.0;
printf(“scale %d\n”,b);
}

// Create a string that contains the exact cascade name
const char* cascade_name =
“C:/Program Files/OpenCV/data/haarcascades/haarcascade_frontalface_alt.xml”;
/*    “haarcascade_profileface.xml”;*/

// Function prototype for detecting and drawing an object from an image
void detect_and_draw( IplImage* image , int scale,int min_neighbors,int min_size,IplImage* faceimg,  CvRect* tmprect );
/*
This will pop up a small box with “Hello World” as the text.
@author: Gavin Page, gsp8334@cs.rit.edu
@date: 28 November 2005
*/
int main( int argc, char** argv ) {

CvCapture* cap  = cvCaptureFromAVI(“C:\\Documents and Settings\\Ramin\\Desktop\\AVIPlayer\\VeryFunny_NEW.avi”);
int count = 1;

IplImage* img1 = cvQueryFrame(cap);
IplImage* img2 = cvQueryFrame(cap);
IplImage* gim1 = cvCreateImage(cvSize(img1->width,img1->height),IPL_DEPTH_8U, 1);
IplImage* gim2 = cvCreateImage(cvSize(img2->width,img2->height),IPL_DEPTH_8U, 1);

cvNamedWindow(“transformed”, 0);
cvNamedWindow(“org”, 0);
// create a toolbar
cvCreateTrackbar(“param”, “org”, &thresh, 21, on_trackbar);
on_trackbar(11);
int number_of_features = N;

img1 = cvQueryFrame(cap);
IplImage* sframe;
sframe = cvCreateImage(cvSize(img1->width/1,img1->height/1),img1->depth,img1->nChannels);

cvResize(img1,sframe);
cvFlip(sframe,sframe,0);
IplImage * faceimg = cvCreateImage(cvSize(sframe->width,sframe->height),sframe->depth,sframe->nChannels);
CvRect * tmprect = new CvRect();

while(1){

img1 = cvQueryFrame(cap);
cvResize(img1,sframe);
cvFlip(sframe,sframe,0);
detect_and_draw( sframe,1,1,0,faceimg,tmprect);

cvConvertImage( img1, gim1, CV_CVTIMG_FLIP );
cvWaitKey(1);
/*    img2 = cvQueryFrame(cap);
cvConvertImage( img2, gim2, CV_CVTIMG_FLIP );
*/

//feature points to track
/*    if( 0 ){ //save the image
char name[] = “c:\\flow\000.jpg”;

//name += 2*11;
name[11] = (char)(count % 10+48);
if(count >= 10 )
name[10] =(char)(count / 10+48);
if(count >= 100 )
name[9] =(char)(count / 100+48);

printf(“%s\n”,name);
cvSaveImage(name,gim1 );
count++;
}
*/    // map matrix for WarpAffine, stored in array

CvMat* map_matrix = cvCreateMat(2, 3, CV_32F);

int w = gim1->width;
int h = gim1->height;
double moveC = (scale-1)/2;

cvmSet(map_matrix ,0,0, scale);
cvmSet(map_matrix ,0,1, 0);
cvmSet(map_matrix ,0,2, -moveC*w);

cvmSet(map_matrix ,1,0, 0);
cvmSet(map_matrix ,1,1, scale);
cvmSet(map_matrix ,1,2, -moveC*h);

cvWarpAffine(gim1,gim2,map_matrix);

cvShowImage(“transformed”, gim2);

CvPoint p1,p2,q1,q2;

p1.x = (int)w/2-w/scale/2;
p1.y = (int) h/2 + h/scale/2;

p2.x = (int)w/2+w/scale/2;
p2.y = (int) h/2 + h/scale/2;

q1.x = (int)w/2-w/scale/2;
q1.y = (int) h/2 – h/scale/2;

q2.x = (int)w/2+w/scale/2;
q2.y = (int) h/2 – h/scale/2;

CvScalar color = CV_RGB(50,0,250);
cvLine( img1, p1, q1, color, 1, CV_AA, 0 );
cvLine( img1, p2, q2, color, 1, CV_AA, 0 );
cvLine( img1, p1, p2, color, 1, CV_AA, 0 );
cvLine( img1, q1, q2, color, 1, CV_AA, 0 );

cvShowImage(“org”, img1);
//cvResizeWindow( “HelloWorld”, 800, 800 );
//cvMoveWindow( “HelloWorld”, 200, 0 );

//if((cvWaitKey(100) & 255) == 27) break;
}

return 0;
}
// Function to detect and draw any faces that is present in an image
void detect_and_draw( IplImage* img, int scale,int min_neighbors,int min_size,IplImage* faceimg,  CvRect* tmprect )
{

// Create memory for calculations
static CvMemStorage* storage = 0;

// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;

//int scale = 1;

// Create a new image based on the input image
IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );

// Create two points to represent the face locations
CvPoint pt1, pt2;
int i;

// Load the HaarClassifierCascade
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );

// Check whether the cascade has loaded successfully. Else report and error and quit
if( !cascade )
{
fprintf( stderr, “ERROR: Could not load classifier cascade\n” );
return;
}

// Allocate the memory storage
storage = cvCreateMemStorage(0);

// Create a new named window with title: result
cvNamedWindow( “result”, 1 );

// Clear the memory storage which was used before
cvClearMemStorage( storage );

// Find whether the cascade is loaded, to find the faces. If yes, then:
if( cascade )
{

// There can be more than one face in an image. So create a growable sequence of faces.
// Detect the objects and store them in the sequence
CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
1.1, min_neighbors, CV_HAAR_DO_CANNY_PRUNING,
cvSize(min_size, min_size) );

// Loop the number of faces found.
for( i = 0; i < (faces ? faces->total : 0); i++ )
{
// Create a new rectangle for drawing the face
// CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
tmprect = (CvRect*)cvGetSeqElem( faces, i );
if (tmprect->height<= 0 ) continue;
cvSetImageROI( img, cvRect(tmprect->x*scale,tmprect->y*scale,tmprect->width*scale,tmprect->height*scale) );
//sprintf(file_num,”G:\\faces_from_movie\\%d.jpg”,int(framenum));
//cvReleaseImage(faceimg);
faceimg = cvCreateImage(cvSize(tmprect->width*scale,tmprect->height*scale),img->depth,img->nChannels);
//faceimg = cvCloneImage(img);
cvCopyImage(img,faceimg);
cvNamedWindow(“The Face”, 0);
cvShowImage(“The Face”, faceimg);

cvResetImageROI(img);

// Find the dimensions of the face,and scale it if necessary
pt1.x = tmprect->x*scale;
pt2.x = (tmprect->x+tmprect->width)*scale;
pt1.y = tmprect->y*scale;
pt2.y = (tmprect->y+tmprect->height)*scale;

// Draw the rectangle in the input image
cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
}
}

// Show the image in the window named “result”
cvShowImage( “result”, img );
//IplROI roi =  cvRectToROI(&r

// Release the temp image created.
cvReleaseImage( &temp );
}

Sample Image

Posted in Uncategorized | Leave a Comment »

Calculating Fundamental Matrix

Posted by rmehran on February 5, 2008

Even though OpenCV Wiki states the following code as example for fundamental matrix calculation I made another code to work. There was an issue with different behaviors in cvFindFundamentalMat() and cvFindFundamentalMatrix() that I didn’t understand as well.

Lets’ look at the example code from Wiki

Example. Estimation of fundamental matrix using RANSAC algorithm from wiki

int point_count = 100;
CvMat* points1;
CvMat* points2;
CvMat* status;
CvMat* fundamental_matrix;

points1 = cvCreateMat(1,point_count,CV_32FC2);
points2 = cvCreateMat(1,point_count,CV_32FC2);
status = cvCreateMat(1,point_count,CV_8UC1);

/* Fill the points here … */
for( i = 0; i < point_count; i++ )
{
points1->data.db[i*2] = ;
points1->data.db[i*2+1] = ;
points2->data.db[i*2] = ;
points2->data.db[i*2+1] = ;
}

fundamental_matrix = cvCreateMat(3,3,CV_32FC1);
int fm_count = cvFindFundamentalMat(
points1,points2,fundamental_matrix,
CV_FM_RANSAC,1.0,0.99,status );

My example code for Fundamental Matrix (source)

with help of code from Paul Smith (UCF vision Lab website) calculating the fundamental matrix using 8-point algorithm.


// first_console_openCV.cpp : Defines the entry point for the console application.
//
#include “stdafx.h”
#include
#include
#include
struct mousedata
{
int cntr ;
CvPoint p1[8];
CvPoint p2[8];

} pp ={0,{},{}};
IplImage* image ;
IplImage* image2 ;

void on_mouse(int event, int x, int y, int flags, void* param)
{
switch(event)
{
case CV_EVENT_LBUTTONDOWN:
{

int cntr = pp.cntr;
char cntr_s[5];

CvFont font;
cvInitFont(&font,CV_FONT_HERSHEY_PLAIN|CV_FONT_ITALIC,1,1,0,1);

CvPoint ptr = cvPoint(x,y);
printf(“(%d,%d)”,ptr.x,ptr.y);
if (cntr <8)
{
itoa(cntr,cntr_s,10);
pp.p1[cntr] = ptr;
cvCircle(image,pp.p1[cntr],1,cvScalar(0,0,255),3);

cvPutText(image,cntr_s,pp.p1[cntr],&font,cvScalar(0,0,255));
cvShowImage(“mywindow”, image);
cvSaveImage(“c:\img1.jpg”,image);

}
else if (cntr <16)
{ pp.p2[cntr-8] = ptr;
cvCircle(image2,pp.p2[cntr-8],1,cvScalar(0,0,255),3);
_itoa(cntr-8,cntr_s,10);
cvPutText(image2,cntr_s,pp.p2[cntr-8],&font,cvScalar(0,0,255));
cvShowImage(“mywindow2″, image2);
cvSaveImage(“c:\img2.jpg”,image2);
}

pp.cntr = cntr + 1;
}
break;

}
}
int _tmain(int argc, _TCHAR* argv[])
{

// mousedada *pp;
pp.cntr = 0;

// load 8-bit, 1 channel grayscale PGM image
image = cvLoadImage(“C:\Documents and Settings\Ramin\Desktop\images\apt1.jpg”);
image2 = cvLoadImage(“C:\Documents and Settings\Ramin\Desktop\images\apt2.jpg”);

// Create a window in which the captured images will be presented
cvNamedWindow(“mywindow”, CV_WINDOW_AUTOSIZE);

// set the mouse call back
cvSetMouseCallback(“mywindow”,on_mouse,0);

// display results
cvShowImage(“mywindow”, image);

// Create a window in which the captured images will be presented
cvNamedWindow(“mywindow2″, CV_WINDOW_AUTOSIZE);

// set the mouse call back
cvSetMouseCallback(“mywindow2″,on_mouse,0 );

cvShowImage(“mywindow2″, image2);

// wait for a keypress
cvWaitKey(0);

//transfer the vector of points to the appropriate opencv matrix structures
int i1,i2;
i2 =0;
int numPoints =8;
CvMat* points1;
CvMat* points2;
CvMat* status;
CvMat* fundMatr;
points1 = cvCreateMat(2,numPoints,CV_32F);
points2 = cvCreateMat(2,numPoints,CV_32F);
status = cvCreateMat(1,numPoints,CV_32F);

for ( i1 = 0; i1 < numPoints; i1++) {

cvSetReal2D(points1,0,i1,pp.p1[i1].x/1);
cvSetReal2D(points1,1,i1,pp.p1[i1].y/1);

cvSetReal2D(points2,0,i1,pp.p2[i1].x/1);
cvSetReal2D(points2,1,i1,pp.p2[i1].y/1);
}

//create the output fundamental matrix
fundMatr = cvCreateMat(3,3,CV_32F);

//see opencv manual for other options in computing the fundamental matrix
int num = cvFindFundamentalMat(points1,points2,fundMatr,CV_FM_8POINT,1.0,0.9999,status);

if( num == 1 )
{
printf(“Fundamental matrix was foundn”);

}
else
{
printf(“Fundamental matrix was not foundn”);
return -1;

}

//now visualize the fundamental matrix
int numOutputPoints;

CvMat* corrLines;
corrLines= cvCreateMat(3,numPoints,CV_32F);

//specify which direction to compute epipolar lines
int startImage = 2;
cvComputeCorrespondEpilines( points2,
startImage,//means points are in image 1
fundMatr,
corrLines);

CvMat* a = cvCreateMat(3,1,CV_32F);
CvMat* b = cvCreateMat(3,1,CV_32F);
CvMat* c = cvCreateMat(3,1,CV_32F);
CvMat* d = cvCreateMat(3,1,CV_32F);

//create output window
char windowName[100];
strcpy_s(windowName,”Output Window”);
cvNamedWindow(windowName,CV_WINDOW_AUTOSIZE);

//for all the points set the point and corresponding epipolar line
//and determine where the epipolar line intersects the image plane
//then display all this info
CvMat* epiLine = cvCreateMat(1,3,CV_32F);
for ( i1 = 0; i1 < numPoints; i1++) {

for (i2 = 0; i2 < 3; i2++) {
cvmSet(epiLine,0,i2,cvmGet(corrLines,i2,i1));
}

CvPoint epipolarLinePoint1, epipolarLinePoint2;

int i4;

CvMat* a = cvCreateMat(3,1,CV_32F);
CvMat* b = cvCreateMat(3,1,CV_32F);
CvMat* c = cvCreateMat(3,1,CV_32F);
CvMat* d = cvCreateMat(3,1,CV_32F);

for ( i4 = 0; i4 < 3; i4++) {

cvSetReal2D(a,i4,0,cvGetReal2D(epiLine,0,i4)/cvGetReal2D(epiLine,0,2));
}

if (abs(cvGetReal2D(epiLine,0,0)) > abs(cvGetReal2D(epiLine,0,1)) ){

double ylim = image->height;

cvSetReal2D(b,0,0,0);
cvSetReal2D(b,1,0,1);
cvSetReal2D(b,2,0,0);

cvCrossProduct(a,b,c);
for ( i4 = 0; i4 < 3; i4++) {
cvSetReal2D(c,i4,0,cvGetReal2D(c,i4,0)/cvGetReal2D(c,2,0));
}

cvSetReal2D(b,0,0,0);
cvSetReal2D(b,1,0,-1.0/ylim);
cvSetReal2D(b,2,0,1);
cvCrossProduct(a,b,d);
for ( i4 = 0; i4 < 3; i4++) {
cvSetReal2D(d,i4,0,cvGetReal2D(d,i4,0)/cvGetReal2D(d,2,0));
}

}
else {
double xlim = image->width;
cvSetReal2D(b,0,0,1);
cvSetReal2D(b,1,0,0);
cvSetReal2D(b,2,0,0);

cvCrossProduct(a,b,c);
for ( i4 = 0; i4 < 3; i4++) {
cvSetReal2D(c,i4,0,cvGetReal2D(c,i4,0)/cvGetReal2D(c,2,0));
}

cvSetReal2D(b,0,0,-1.0/xlim);
cvSetReal2D(b,1,0,0);
cvSetReal2D(b,2,0,1);
cvCrossProduct(a,b,d);
for ( i4 = 0; i4 < 3; i4++) {
cvSetReal2D(d,i4,0,cvGetReal2D(d,i4,0)/cvGetReal2D(d,2,0));
}

}

epipolarLinePoint1.x = cvmGet(c,0,0);
epipolarLinePoint1.y = cvmGet(c,1,0);

epipolarLinePoint2.x = cvmGet(d,0,0);
epipolarLinePoint2.y = cvmGet(d,1,0);

//cvCircle(image2,cvPoint(cvmGet(points2,0,i1),cvmGet(points2,1,i1)),5,CV_RGB(255,255,0),1);

cvShowImage(windowName,image2);
//cvWaitKey(0);

cvLine(image,epipolarLinePoint1,epipolarLinePoint2,CV_RGB(0,255,0));

cvShowImage(windowName,image);
// cvWaitKey(0);
}
cvWaitKey(0);
cvSaveImage(“c:\epipolars.jpg”,image);
return 0;

}

Posted in Uncategorized | Leave a Comment »

Necessary Libraries

Posted by rmehran on February 5, 2008

Always remember to add libraries to OpenCV Project in Visual Studio
Go to project properties-> linker -> input. In additonal dependencies box, add these libraries:

cxcore.lib cv.lib highgui.lib cvaux.lib cvcam.lib

Posted in Uncategorized | Leave a Comment »

 
Follow

Get every new post delivered to your Inbox.