#include "SkeletonSensor.h"
// openCV
#include <opencv/highgui.h>
#include <opencv/cv.h>
using namespace cv;
#include <iostream>
using namespace std;
// globals
SkeletonSensor* sensor;
const unsigned int XRES = 640;
const unsigned int YRES = 480;
const float DEPTH_SCALE_FACTOR = 255./4096.;
// defines the value about which thresholding occurs
const unsigned int BIN_THRESH_OFFSET = 5;
// defines the value about witch the region of interest is extracted
const unsigned int ROI_OFFSET = 70;
// median blur factor
const unsigned int MEDIAN_BLUR_K = 5;
// grasping threshold
const double GRASPING_THRESH = 0.9;
// colors
const Scalar COLOR_BLUE = Scalar(240,40,0);
const Scalar COLOR_DARK_GREEN = Scalar(0, 128, 0);
const Scalar COLOR_LIGHT_GREEN = Scalar(0,255,0);
const Scalar COLOR_YELLOW = Scalar(0,128,200);
const Scalar COLOR_RED = Scalar(0,0,255);
// returns true if the hand is near the sensor area
bool handApproachingDisplayPerimeter(float x, float y)
{
return (x > (XRES - ROI_OFFSET)) || (x < (ROI_OFFSET)) ||
(y > (YRES - ROI_OFFSET)) || (y < (ROI_OFFSET));
}
// conversion from cvConvexityDefect
struct ConvexityDefect
{
Point start;
Point end;
Point depth_point;
float depth;
};
// Thanks to Jose Manuel Cabrera for part of this C++ wrapper function
void findConvexityDefects(vector<Point>& contour, vector<int>& hull, vector<ConvexityDefect>& convexDefects)
{
if(hull.size() > 0 && contour.size() > 0)
{
CvSeq* contourPoints;
CvSeq* defects;
CvMemStorage* storage;
CvMemStorage* strDefects;
CvMemStorage* contourStr;
CvConvexityDefect *defectArray = 0;
strDefects = cvCreateMemStorage();
defects = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvSeq),sizeof(CvPoint), strDefects );
//We transform our vector<Point> into a CvSeq* object of CvPoint.
contourStr = cvCreateMemStorage();
contourPoints = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), contourStr);
for(int i = 0; i < (int)contour.size(); i++) {
CvPoint cp = {contour[i].x, contour[i].y};
cvSeqPush(contourPoints, &cp);
}
//Now, we do the same thing with the hull index
int count = (int) hull.size();
//int hullK[count];
int* hullK = (int*) malloc(count*sizeof(int));
for(int i = 0; i < count; i++) { hullK[i] = hull.at(i); }
CvMat hullMat = cvMat(1, count, CV_32SC1, hullK);
// calculate convexity defects
storage = cvCreateMemStorage(0);
defects = cvConvexityDefects(contourPoints, &hullMat, storage);
defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect)*defects->total);
cvCvtSeqToArray(defects, defectArray, CV_WHOLE_SEQ);
//printf("DefectArray %i %i\n",defectArray->end->x, defectArray->end->y);
//We store defects points in the convexDefects parameter.
for(int i = 0; i<defects->total; i++){
ConvexityDefect def;
def.start = Point(defectArray[i].start->x, defectArray[i].start->y);
def.end = Point(defectArray[i].end->x, defectArray[i].end->y);
def.depth_point = Point(defectArray[i].depth_point->x, defectArray[i].depth_point->y);
def.depth = defectArray[i].depth;
convexDefects.push_back(def);
}
// release memory
cvReleaseMemStorage(&contourStr);
cvReleaseMemStorage(&strDefects);
cvReleaseMemStorage(&storage);
}
}
int main(int argc, char** argv)
{
// initialize the kinect
sensor = new SkeletonSensor();
sensor->initialize();
sensor->setPointModeToProjective();
Mat depthRaw(YRES, XRES, CV_16UC1);
Mat depthShow(YRES, XRES, CV_8UC1);
Mat handDebug;
// this vector holds the displayed images of the hands
vector<Mat> debugFrames;
// rectangle used to extract hand regions from depth map
Rect roi;
roi.width = ROI_OFFSET*2;
roi.height = ROI_OFFSET*2;
namedWindow("depthFrame", CV_WINDOW_AUTOSIZE);
namedWindow("leftHandFrame", CV_WINDOW_AUTOSIZE);
namedWindow("rightHandFrame", CV_WINDOW_AUTOSIZE);
int key = 0;
while(key != 27 && key != 'q')
{
sensor->waitForDeviceUpdateOnUser();
// update 16 bit depth matrix
memcpy(depthRaw.data, sensor->getDepthData(), XRES*YRES*2);
depthRaw.convertTo(depthShow, CV_8U, DEPTH_SCALE_FACTOR);
for(int handI = 0; handI < 2; handI++)
{
int handDepth;
if(sensor->getNumTrackedUsers() > 0)
{
Skeleton skel = sensor->getSkeleton(sensor->getUID(0));
SkeletonPoint hand;
if( handI == 0)
hand = skel.leftHand;
else
hand = skel.rightHand;
if(hand.confidence == 1.0)
{
handDepth = hand.z * (DEPTH_SCALE_FACTOR);
if(!handApproachingDisplayPerimeter(hand.x, hand.y))
{
roi.x = hand.x - ROI_OFFSET;
roi.y = hand.y - ROI_OFFSET;
}
}
}
else
handDepth = -1;
// extract hand from image
Mat handCpy(depthShow, roi);
Mat handMat = handCpy.clone();
// binary threshold
if(handDepth != -1)
handMat = (handMat > (handDepth - BIN_THRESH_OFFSET)) & (handMat < (handDepth + BIN_THRESH_OFFSET));
// last pre-filtering step, apply median blur
medianBlur(handMat, handMat, MEDIAN_BLUR_K);
// create debug image of thresholded hand and cvt to RGB so hints show in color
handDebug = handMat.clone();
debugFrames.push_back(handDebug);
cvtColor(debugFrames[handI], debugFrames[handI], CV_GRAY2RGB);
std::vector< std::vector<Point> > contours;
findContours(handMat, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
if (contours.size()) {
for (int i = 0; i < contours.size(); i++) {
vector<Point> contour = contours[i];
Mat contourMat = Mat(contour);
double cArea = contourArea(contourMat);
if(cArea > 2000) // likely the hand
{
Scalar center = mean(contourMat);
Point centerPoint = Point(center.val[0], center.val[1]);
// approximate the contour by a simple curve
vector<Point> approxCurve;
approxPolyDP(contourMat, approxCurve, 10, true);
vector< vector<Point> > debugContourV;
debugContourV.push_back(approxCurve);
drawContours(debugFrames[handI], debugContourV, 0, COLOR_DARK_GREEN, 3);
vector<int> hull;
convexHull(Mat(approxCurve), hull, false, false);
// draw the hull points
for(int j = 0; j < hull.size(); j++)
{
int index = hull[j];
circle(debugFrames[handI], approxCurve[index], 3, COLOR_YELLOW, 2);
}
// find convexity defects
vector<ConvexityDefect> convexDefects;
findConvexityDefects(approxCurve, hull, convexDefects);
printf("Number of defects: %d.\n", (int) convexDefects.size());
for(int j = 0; j < convexDefects.size(); j++)
{
circle(debugFrames[handI], convexDefects[j].depth_point, 3, COLOR_BLUE, 2);
没有合适的资源?快使用搜索试试~ 我知道了~
Robert Walter手部提取代码
共18个文件
obj:3个
cpp:3个
pdb:2个
5星 · 超过95%的资源 需积分: 12 343 下载量 105 浏览量
2013-03-02
10:14:17
上传
评论 16
收藏 1.01MB ZIP 举报
温馨提示
一般情况下,手势识别的第一步就是先手势定位,即手势所在部位的提取。本文是基于kinect来提取手势识别的,即先通过kinect找出人体的轮廓,然后定位轮廓中与手部有关的点,在该点的周围提取出满足一定要求的区域,对该区域进行滤波后得到的区域就是手部了。然后利用凸包和凹陷的数学几何方法,画出手指和手指凹陷处的点,以及手的轮廓线,并在图像中显示出来。文章所有代码都是网友Robert Walter提供的。
资源推荐
资源详情
资源评论
收起资源包目录
bmwesting.zip (18个子文件)
bmwesting
bmwesting.pro 1KB
bmwesting.pro.user 17KB
log.h 3KB
release
vc100.pdb 1.17MB
debug
log.obj 4KB
bmwesting.exe 137KB
bmwesting.intermediate.manifest 638B
SkeletonSensor.obj 291KB
bmwesting.ilk 807KB
main.obj 521KB
bmwesting.pdb 1.79MB
log.cpp 3KB
main.cpp 9KB
Makefile 6KB
SkeletonSensor.cpp 8KB
Makefile.Debug 6KB
Makefile.Release 6KB
SkeletonSensor.h 3KB
共 18 条
- 1
tornadomeet
- 粉丝: 41
- 资源: 25
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
- 1
- 2
- 3
- 4
- 5
- 6
前往页