#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
#include <string>
#include <vector>
#include <fstream> //用于保存监控记录
#include "GENZONG.h"
using namespace std;
// typedef struct
// {
// int flag;
// int min_w;
// int min_h;
// int max_w;
// int max_h;
// }Rect;
//
// typedef struct
// {
// int flag;
// int min_w;
// int min_h;
// int max_w;
// int max_h;
// float *objectHist;
// vector<CvPoint> trajectory;//记录目标轨迹
// CvPoint start;
// int len;//轨迹点数
//
// }Object;
int iniBkT=6; //初始背景掩膜判断阈值(影响背景建立速度)
int thrZero=10; //初始背景建成阈值(简单背景时取10,复杂背景取100)
float gx=(float)0.0008;
Rect blobNow[1000] ;
int numberBlobNow=0;
int blobNowFlag=0;
CvKalman *kalman[1000];
Object object[100];
int allObjectNum = 0;
#define calc_point(p) cvPoint(cvRound(p[0]), cvRound(p[1]))
Object objectPredict[1000];
#define NUM (8*8*8)
# define R_BIN 8 /* 红色分量的直方图条数 */
# define G_BIN 8 /* 绿色分量的直方图条数 */
# define B_BIN 8 /* 兰色分量的直方图条数 */
# define R_SHIFT 5 /* 与上述直方图条数对应 */
# define G_SHIFT 5 /* 的R、G、B分量左移位数 */
# define B_SHIFT 5 /* log2( 256/8 )为移动位数 */
vector<CvPoint*> trajectorys;
vector<int> ntrajectorys;
CvRect selection;
int selectionOrigin=1; //为1时,使用默认的监测区域
int select_object = 0;
int detect_object = 0; //等于1时,才开始目标检测
int new_detect_object = 0; //为1时,重新分配内存
unsigned int nFrmNum = 0; //采集的帧数
IplImage *image = NULL; //保存捕捉的视频帧,用于鼠标响应函数
int In_num = 0;
int Out_num = 0;
/************************************************************************/
/* 二值化 t1是阈值 */
/************************************************************************/
void threshold(int t1,IplImage* image)
{
for (int i=0;i< image->height;i++) //行
{
for (int j=0;j< image->width;j++) //列
{
uchar t=((uchar*)( image->imageData + image->widthStep*i))[j];
if (t>t1)
((uchar*)( image->imageData + image->widthStep*i))[j]=255;
else
((uchar*)( image->imageData + image->widthStep*i))[j]=0;
}
}
}
/************************************************************************/
/* 初始化为零 */
/************************************************************************/
void setZero(IplImage* image)
{
for(int i=0;i<image->height;i++)
{
for(int j=0;j<image->width;j++)
{
((uchar*)(image->imageData + image->widthStep*i))[j] = 0 ;
}
}
}
/************************************************************************/
/* 给运动区域加框 */
/************************************************************************/
int RegionRect(IplImage* frontImgBinary,IplImage* frontImgGray, IplImage *imageRGB, IplImage *imageHue)
{
int i, j, n = 0;
int flage=0;
Rect blobTemp[1000];
//更新当前帧框位置
for(i=0;i<frontImgBinary->height;i++)
for(j=0;j<frontImgBinary->width;j++)
{
//从左至右,从上至下扫描二值前景图,遇目标像素(即白色)则开始区域增长
if(*(uchar *)(frontImgBinary->imageData + frontImgBinary->widthStep*i+j)==255)
blobTemp[n++] = RegionGrow(j,i,frontImgBinary);
}//n为区域增长后的独立区域个数,增长后的矩形框满足条件,则flag=1,否则为0
int a = 0;//a为增长之后满足条件的区域的个数
for(i=0; i<n; i++)
{
if(blobTemp[i].flag)//把增长之后满足条件的区域,把它的边界值赋给当前帧存储目标框的变量blobNow
{
blobNow[a].min_w = blobTemp[i].min_w;
blobNow[a].min_h = blobTemp[i].min_h;
blobNow[a].max_w = blobTemp[i].max_w;
blobNow[a].max_h = blobTemp[i].max_h;
blobNow[a].flag = blobTemp[i].flag;
a++;
}
}
numberBlobNow = a;//本帧前景块数
for(int k=0;k<frontImgBinary->height;k++)
{
for(int l=0;l<frontImgBinary->width;l++)
{
if(((uchar*)(frontImgBinary->imageData + frontImgBinary->widthStep*k))[l] ==40 )
((uchar*)(frontImgBinary->imageData + frontImgBinary->widthStep*k))[l] =255;
}
}
//本帧为首次画框,则目标全部为新目标进场
if(!blobNowFlag)
{
for(i=0; i<numberBlobNow; i++)
{
if((blobNow[i].max_w-blobNow[i].min_w)>20 &&
(blobNow[i].max_h-blobNow[i].min_h)>20)
{
kalman[i]= InitializeKalman(kalman[i]);
newObject(object[i], blobNow[i], kalman[i], frontImgGray, imageRGB, selection);
allObjectNum++;
}
}
}
for(i=0; i<100; i++)
{
if(object[i].flag)
{
//预测值:Kalman预测的本帧中心位置,作为本帧搜索起始点
const CvMat* prediction = cvKalmanPredict( kalman[i], 0 );
CvPoint predictCenter;
predictCenter = calc_point(prediction->data.fl);
//限制预测中心的区域,防止预测框超出视频
int halfWidth = (object[i].max_w-object[i].min_w)/2;
int halfHeight = (object[i].max_h-object[i].min_h)/2;
//只要保证目标框与视频窗口有交集即可,画框时也仅画交集区
predictCenter.x = MAX(predictCenter.x, -halfWidth);
predictCenter.x = MIN(predictCenter.x, frontImgBinary->width+halfWidth);
predictCenter.y = MAX(predictCenter.y, -halfHeight);
predictCenter.y = MIN(predictCenter.y, frontImgBinary->height+halfHeight);
objectPredict[i].min_w = predictCenter.x-halfWidth;
objectPredict[i].min_h = predictCenter.y-halfHeight;
objectPredict[i].max_w = predictCenter.x+halfWidth;
objectPredict[i].max_h = predictCenter.y+halfHeight;
}
}
int oldAllObjectNum = allObjectNum%100;
if(blobNowFlag)
{
campObjectRect(object, objectPredict, blobNow, 100, numberBlobNow,
frontImgBinary, frontImgGray, imageRGB, imageHue, &allObjectNum, selection, kalman);
}
else
{
blobNowFlag = 1;
}
char strFrameNum[10];
CvFont font;
int lineType=8;
cvInitFont(&font, 5, 1, 1, 0, 2);
CvScalar colour;
for( i = 0; i<100; i++)
{
if(object[i].flag==1)
{
CvPoint pointRect1 = cvPoint(object[i].min_w+selection.x,object[i].min_h+selection.y);
CvPoint pointRect2 = cvPoint(object[i].max_w+selection.x,object[i].max_h+selection.y);
colour = CV_RGB(255,0,0);
if(i>=oldAllObjectNum)
colour = CV_RGB(0,255,0);
cvRectangle(imageRGB,pointRect1, pointRect2,colour,2, lineType);
itoa(i+1, strFrameNum, 10);
if(pointRect1.y>10)
cvPutText(imageRGB, strFrameNum, cvPoint(pointRect1.x, pointRect1.y-3), &font, colour);
else
cvPutText(imageRGB, strFrameNum, cvPoint(pointRect2.x-30, pointRect2.y+15), &font, colour);
flage = 1;
}
}
//------------------------------------------------------------------------------------------------------------------------
char out_number[10];
char In_number[10];
cvInitFont(&font, 5, 1, 1, 0, 2);
itoa( In_num, In_number, 10 );
itoa( Out_num, out_number, 10 );
cvPutText(image, In_number, cvPoint(selection.x+selection.width, selection.y), &font, cvScalar( 255, 0, 255 ));
cvPutText(image, out_number,cvPoint(selection.x+selection.width, selection.y+30), &font, cvScalar( 255, 255, 0 ));
//------------------------------------------------------------------------------------------------------------------------
return flage;
}
int main( int argc, char** argv )
{
CvCapture* pCapture = NULL; //捕捉的视频
IplImage* queryFrame = NULL; //捕捉的视频帧
IplImage* image_gray = NULL; //保存视频帧的灰度图
IplImage* pFrame = NULL; //加检测区域后的视频帧
IplImage* temp = NULL;
IplImage* frameDiff = NULL; //帧差图像
IplImage* pBkImgTemp = NULL;
IplImage* pFrImg_cross = NULL;
IplImage* pBkImg = NULL; //背景灰度图
- 1
- 2
- 3
- 4
- 5
- 6
前往页