#include "widget.h"
#include "ui_widget.h"
#include <QtGui>
#include <highgui.h>
#ifndef PROPERTY
#define PROPERTY
#define image_width 320 //图片显示宽度
#define image_height 240 //图片显示高度
#define image_Format QImage::Format_RGB888 //图片显示格式
#define cameraDevice "/dev/video0" //摄像头设备
#define haarXML "./data/haarcascade_frontalface_alt2.xml" //人脸模型级联分类器文件
#define imgSizeScaleSmall 0.5 //图像防缩比例
#define imgSizeScaleBig 2 //图像防缩比例
#endif //PROPERTY
Widget::Widget(QWidget *parent) :
QWidget(parent),
ui(new Ui::Widget)
{
ui->setupUi(this);
imgBuf = (unsigned char *)malloc(image_width * image_height/*QWidget::width()*QWidget::height()*/* 3 * sizeof(char));
frame = new QImage(imgBuf,image_width,image_height,image_Format); //根据存储空间开辟QImage图像
m_camera = new VideoDevice(tr(cameraDevice)); //获取摄像头设备
connect(m_camera, SIGNAL(display_error(QString)), this,SLOT(display_error(QString)));
camReturn = m_camera->open_device(); //打开摄像头
if(-1==camReturn)
{
QMessageBox::warning(this,tr("error"),tr("open /dev/dsp error"),QMessageBox::Yes);
m_camera->close_device();
}
camReturn = m_camera->init_device(); //初始化摄像头
if(-1==camReturn)
{
QMessageBox::warning(this,tr("error"),tr("init failed"),QMessageBox::Yes);
m_camera->close_device();
}
camReturn = m_camera->start_capturing(); //摄像头开始捕获图像
if(-1==camReturn)
{
QMessageBox::warning(this,tr("error"),tr("start capture failed"),QMessageBox::Yes);
m_camera->close_device();
}
if(-1==camReturn)
{
QMessageBox::warning(this,tr("error"),tr("get frame failed"),QMessageBox::Yes);
m_camera->stop_capturing();
}
//设定时间间隔,对图像界面进行刷新
timer = new QTimer(this);
connect(timer,SIGNAL(timeout()),this,SLOT(update())); //update将调用paintEvent
timer->start(30);
// Storage for the rectangles detected
cascadeFile = haarXML;
cascade = (CvHaarClassifierCascade *) cvLoad(cascadeFile.toUtf8());
m_FaceCount = 0;
storage = cvCreateMemStorage(0);
}
Widget::~Widget()
{
delete ui;
}
void Widget::changeEvent(QEvent *e)
{
QWidget::changeEvent(e);
switch (e->type()) {
case QEvent::LanguageChange:
ui->retranslateUi(this);
break;
default:
break;
}
}
void Widget::paintEvent(QPaintEvent *)
{
uchar * pImgBuf;
unsigned int len;
camReturn = m_camera->get_frame((void **)&pImgBuf,&len);
convert_yuv_to_rgb_buffer(pImgBuf,imgBuf,image_width,image_height/*QWidget::width(),QWidget::height()*/);
frame->loadFromData((uchar *)imgBuf,/*len*/image_width * image_height * 3 * sizeof(char));
IplImage* src = QImageToIplImage(frame);
// IplImage* img = cvLoadImage("./funny.jpg");
if (!src)
{
printf("img error!");
return;
}
// cvSaveImage("jason1.jpg", src);
double sizeScale = imgSizeScaleSmall;
CvSize img_cvsize;
img_cvsize.width = src->width * sizeScale;
img_cvsize.height = src->height * sizeScale;
IplImage* dst = cvCreateImage(img_cvsize, src->depth, src->nChannels);
cvResize(src, dst, CV_INTER_LINEAR);
detect_and_draw(dst);
sizeScale = imgSizeScaleBig;
img_cvsize.width = dst->width * sizeScale;
img_cvsize.height = dst->height * sizeScale;
IplImage* img = cvCreateImage(img_cvsize, dst->depth, dst->nChannels);
cvResize(dst, img, CV_INTER_LINEAR);
// cvSaveImage("jason2.jpg", img);
QImage qimage = QImage((uchar *)img->imageData, img->width,img->height, image_Format);
//IplImage为BGR格式,QImage为RGB格式,所以要交换B和R通道显示才正常
//可以用OpenCV的cvConcertImage函数交换,也可以用QImage的rgbSwapped函数交换;
qimage = qimage.rgbSwapped();
ui->m_imgLabel->setPixmap(QPixmap::fromImage(qimage));
camReturn = m_camera->unget_frame();
cvReleaseImage(&img); //释放图片内存
cvReleaseImage(&src);
}
void Widget::display_error(QString err)
{
QMessageBox::warning(this,tr("error"), err,QMessageBox::Yes);
}
/*yuv格式转换为rgb格式*/
int Widget::convert_yuv_to_rgb_buffer(unsigned char *yuv, unsigned char *rgb, unsigned int width, unsigned int height)
{
unsigned int in, out = 0;
unsigned int pixel_16;
unsigned char pixel_24[3];
unsigned int pixel32;
int y0, u, y1, v;
for(in = 0; in < width * height * 2; in += 4) {
pixel_16 =
yuv[in + 3] << 24 |
yuv[in + 2] << 16 |
yuv[in + 1] << 8 |
yuv[in + 0];
y0 = (pixel_16 & 0x000000ff);
u = (pixel_16 & 0x0000ff00) >> 8;
y1 = (pixel_16 & 0x00ff0000) >> 16;
v = (pixel_16 & 0xff000000) >> 24;
pixel32 = convert_yuv_to_rgb_pixel(y0, u, v);
pixel_24[0] = (pixel32 & 0x000000ff);
pixel_24[1] = (pixel32 & 0x0000ff00) >> 8;
pixel_24[2] = (pixel32 & 0x00ff0000) >> 16;
rgb[out++] = pixel_24[0];
rgb[out++] = pixel_24[1];
rgb[out++] = pixel_24[2];
pixel32 = convert_yuv_to_rgb_pixel(y1, u, v);
pixel_24[0] = (pixel32 & 0x000000ff);
pixel_24[1] = (pixel32 & 0x0000ff00) >> 8;
pixel_24[2] = (pixel32 & 0x00ff0000) >> 16;
rgb[out++] = pixel_24[0];
rgb[out++] = pixel_24[1];
rgb[out++] = pixel_24[2];
}
return 0;
}
int Widget::convert_yuv_to_rgb_pixel(int y, int u, int v)
{
unsigned int pixel32 = 0;
unsigned char *pixel = (unsigned char *)&pixel32;
int r, g, b;
r = y + (1.370705 * (v-128));
g = y - (0.698001 * (v-128)) - (0.337633 * (u-128));
b = y + (1.732446 * (u-128));
if(r > 255) r = 255;
if(g > 255) g = 255;
if(b > 255) b = 255;
if(r < 0) r = 0;
if(g < 0) g = 0;
if(b < 0) b = 0;
pixel[0] = r * 220 / 256;
pixel[1] = g * 220 / 256;
pixel[2] = b * 220 / 256;
return pixel32;
}
/*yuv格式转换为rgb格式*/
//QImage 转 IplImage
IplImage* Widget::QImageToIplImage(const QImage * qImage)
{
int width = qImage->width();
int height = qImage->height();
CvSize Size;
Size.height = height;
Size.width = width;
IplImage *IplImageBuffer = cvCreateImage(Size, IPL_DEPTH_8U, 3); //记着释放内存
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x)
{
QRgb rgb = qImage->pixel(x, y);
cvSet2D(IplImageBuffer, y, x, CV_RGB(qRed(rgb), qGreen(rgb), qBlue(rgb)));
}
}
return IplImageBuffer;
}
//人脸检测函数
void Widget::detect_and_draw(IplImage *img)
{
static CvScalar colors[] =
{
{{0,0,255}},
{{0,128,255}},
{{0,255,255}},
{{0,255,0}},
{{255,128,0}},
{{255,255,0}},
{{255,0,0}},
{{255,0,255}}
};
double scale = 2.0;
IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
cvRound (img->height/scale)), 8, 1 );
int i;
cvCvtColor( img, gray, CV_BGR2GRAY );
cvResize( gray, small_img, CV_INTER_LINEAR );
cvEqualizeHist( small_img, small_img );
cvClearMemStorage( storage );
if( cascade )
{
double t = (double)cvGetTickCount();
CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
1.2, 2, 0, cvSize(30, 30) );
t = (double)cvGetTickCount() - t;
printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
for( i = 0; i < (faces ? faces->total : 0); i++ )
{
CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
CvPoint rectP1, rectP2;
rectP1.x = cvRound(r->x * scale);
rectP1.y = cvRound(r->y * scale);
rectP2.x = cvRound((r->x + r->width) * scale);
rectP2.y = cvRound((r->y + r->height) * scale);
cvRectangl
- 1
- 2
前往页