#include "DetectFunc.h"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include <algorithm>
using pcb::UserConfig;
using pcb::RuntimeParams;
using std::vector;
using std::string;
using std::cout;
using std::to_string;
using cv::Mat;
using cv::Point;
using cv::Point2i;
using cv::Point2f;
using cv::Vec4i;
using cv::Rect;
using cv::Size;
using cv::Scalar;
using cv::Ptr;
using cv::KeyPoint;
using cv::DMatch;
using cv::FlannBasedMatcher;
using cv::xfeatures2d::SURF;
using cv::xfeatures2d::SIFT;
using cv::BFMatcher;
#define M_PI 3.14159265358979323846 // pi
DetectFunc::DetectFunc()
{
adminConfig = Q_NULLPTR; //系统参数
userConfig = Q_NULLPTR; //用户参数
runtimeParams = Q_NULLPTR; //运行参数
}
DetectFunc::~DetectFunc()
{
qDebug() << "~DetectFunc";
}
bool DetectFunc::alignImages_surf_load(vector<KeyPoint> &keypoints_1, Mat& descriptors_1, Mat &image_sample_gray, Mat &imgReg, Mat &H, Mat &imMatches)
{
Ptr<SURF> detector = SURF::create(100, 4, 4, true, true);
std::vector<KeyPoint> keypoints_2;
Mat descriptors_2;
double t1 = clock();
cv::Mat pyr;
cv::Size sz = image_sample_gray.size();
pyrDown(image_sample_gray, pyr);
if (userConfig->matchingAccuracyLevel == 2)//低精度
{
pyrDown(pyr, pyr);
}
detector->detectAndCompute(pyr, Mat(), keypoints_2, descriptors_2);
double t2 = clock();
cout << "获取特征点时间" << double(t2 - t1) / CLOCKS_PER_SEC << endl;
Ptr<cv::flann::IndexParams> indexParams = new cv::flann::KDTreeIndexParams(5);
Ptr<cv::flann::SearchParams> searchParams;
FlannBasedMatcher matcher(indexParams);
vector<DMatch> matches;
vector<vector<DMatch>> m_knnMatches;
/*const float minRatio = 1.f / 1.5f;*/
const float minRatio = 0.7;
matcher.knnMatch(descriptors_1, descriptors_2, m_knnMatches, 2);
////Mat outImg;
////drawKeypoints(image_template, keypoints_1, image_template,cv::Scalar::all(-1),cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
////imwrite("outImg.jpg", image_template);
std::sort(m_knnMatches.begin(), m_knnMatches.end(), [](const vector<DMatch> &m1, const vector<DMatch> &m2) {return m1[0].distance < m2[0].distance; });
for (int i = 0; i < m_knnMatches.size(); i++)
{
const DMatch& bestMatch = m_knnMatches[i][0];
const DMatch& betterMatch = m_knnMatches[i][1];
if (bestMatch.distance < 0.7*betterMatch.distance)
{
matches.push_back(bestMatch);
}
}
vector< DMatch > good_matches;
if (!matches.size())
{
cout << "matches is empty! " << endl;
}
else if (matches.size() < 4)
{
cout << matches.size() << " points matched is not enough " << endl;
}
else //单应性矩阵的计算最少得使用4个点
{
for (int i = 0; i < matches.size(); i++)
{
good_matches.push_back(matches[i]);
}
vector<Point2f> temp_points;
vector<Point2f> samp_points;
for (int i = 0; i < matches.size(); i++)
{
temp_points.push_back(keypoints_1[matches[i].queryIdx].pt);
samp_points.push_back(keypoints_2[matches[i].trainIdx].pt);
}
double t3 = clock();
cout << "匹配并获取变换矩阵时间" << double(t3 - t2) / CLOCKS_PER_SEC << endl;
H = findHomography(samp_points, temp_points, cv::RANSAC, 5.0);
int matrixAdj = 2 * (userConfig->matchingAccuracyLevel);
H.at<double>(0, 2) *= matrixAdj;
H.at<double>(1, 2) *= matrixAdj;
H.at<double>(2, 0) /= matrixAdj;
H.at<double>(2, 1) /= matrixAdj;
warpPerspective(image_sample_gray, imgReg, H, image_sample_gray.size());
}
return true;
}
bool DetectFunc::alignImages_sift_load(std::vector<cv::KeyPoint>& keypoints_1, cv::Mat & descriptors_1, cv::Mat & image_sample_gray, cv::Mat & imgReg, cv::Mat & H, cv::Mat & imMatches)
{
Ptr<SIFT> detector = SIFT::create();
std::vector<KeyPoint> keypoints_2;
Mat descriptors_2;
double t1 = clock();
cv::Mat pyr;
cv::Size sz = image_sample_gray.size();
pyrDown(image_sample_gray, pyr);
if (userConfig->matchingAccuracyLevel == 2)//低精度
{
pyrDown(pyr, pyr);
}
detector->detectAndCompute(pyr, Mat(), keypoints_2, descriptors_2);
double t2 = clock();
cout << "获取特征点时间" << double(t2 - t1) / CLOCKS_PER_SEC << endl;
BFMatcher matcher;
vector<DMatch> matches;
vector<vector<DMatch>> m_knnMatches;
/*const float minRatio = 1.f / 1.5f;*/
const float minRatio = 0.7;
matcher.knnMatch(descriptors_1, descriptors_2, m_knnMatches, 2);
////Mat outImg;
////drawKeypoints(image_template, keypoints_1, image_template,cv::Scalar::all(-1),cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
////imwrite("outImg.jpg", image_template);
std::sort(m_knnMatches.begin(), m_knnMatches.end(), [](const vector<DMatch> &m1, const vector<DMatch> &m2) {return m1[0].distance < m2[0].distance; });
for (int i = 0; i < m_knnMatches.size(); i++)
{
const DMatch& bestMatch = m_knnMatches[i][0];
const DMatch& betterMatch = m_knnMatches[i][1];
if (bestMatch.distance < 0.7*betterMatch.distance)
{
matches.push_back(bestMatch);
}
}
vector< DMatch > good_matches;
if (!matches.size())
{
cout << "matches is empty! " << endl;
}
else if (matches.size() < 4)
{
cout << matches.size() << " points matched is not enough " << endl;
}
else //单应性矩阵的计算最少得使用4个点
{
for (int i = 0; i < matches.size(); i++)
{
good_matches.push_back(matches[i]);
}
vector<Point2f> temp_points;
vector<Point2f> samp_points;
for (int i = 0; i < matches.size(); i++)
{
temp_points.push_back(keypoints_1[matches[i].queryIdx].pt);
samp_points.push_back(keypoints_2[matches[i].trainIdx].pt);
}
double t3 = clock();
cout << "匹配并获取变换矩阵时间" << double(t3 - t2) / CLOCKS_PER_SEC << endl;
H = findHomography(samp_points, temp_points, cv::RANSAC, 5.0);
int matrixAdj = 2 * (userConfig->matchingAccuracyLevel);
H.at<double>(0, 2) *= matrixAdj;
H.at<double>(1, 2) *= matrixAdj;
H.at<double>(2, 0) /= matrixAdj;
H.at<double>(2, 1) /= matrixAdj;
warpPerspective(image_sample_gray, imgReg, H, image_sample_gray.size());
}
return true;
}
Mat DetectFunc::myThresh(int curCol, int curRow, const cv::Mat & grayImg, cv::Point point_left, cv::Point point_right,int segThresh, bool threshFlag)
{
int totalCol = runtimeParams->nCamera - 1;//从0开始
int totalRow = runtimeParams->nPhotographing - 1;
Mat res = Mat::zeros(grayImg.size(), CV_8UC1);
Rect rect;//roi区域
if (curCol == 0 && curRow == 0)//左上
{
rect.x = point_left.x;
rect.y = point_right.y;
rect.width = grayImg.cols - point_left.x;
rect.height = grayImg.rows - point_right.y;
}
else if (curCol == 0 && curRow == totalRow)//左下
{
rect.x = point_left.x;
rect.y = 0;
rect.width = grayImg.cols - point_left.x;
rect.height = point_left.y;
}
else if (curCol == totalCol && curRow == 0)//右上
{
rect.x = 0;
rect.y = point_right.y;
rect.width = point_right.x;
rect.height = grayImg.rows - point_right.y;
}
else if (curCol == totalCol && curRow == totalRow)//右下
{
rect.x = 0;
rect.y = 0;
rect.width = point_right.x;
rect.height = point_left.y;
}
else if (curCol == 0 && 0 < curRow && curRow < totalRow)//左边
{
rect.x = point_left.x;
rect.y = 0;
rect.width = grayImg.cols - point_left.x;
rect.height = grayImg.rows;
}
else if (curCol == totalCol && 0 < curRow && curRow < totalRow)//右边
{
rect.x = 0;
rect.y = 0;
rect.width = point_right.x;
rect.height = grayImg.rows;
}
else if (curRow == 0 && 0 < curCol && curCol < totalCol)//上边
{
rect.x = 0;
rect.y = point_right.y;
rect.width = grayImg.cols;
rect.height = grayImg.rows - point_right.y;
}
else if (curRow == totalRow && 0 < curCol && curCol < totalCol)//下边
{
rect.x = 0;
rect.y = 0;
rect.width = grayImg.cols;
rect.height = point_left.y;
}
else if (0 < curCol && curCol < totalCol && 0 < curRow && curRow < totalRow)//中央
{
rect.x = 0;
rect.y = 0;
rect.width = grayImg.cols;
rect.height = grayImg.rows;
}
//int longSize = std::max(rect.width, rect.height);
//int blockSize = longSize / 4 * 2 + 1;
//cv::adaptiveThreshold(grayImg(rect), res(rect), 255, cv::ADAPTIVE_THRESH_MEAN_