#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#define GLOG_NO_ABBREVIATED_SEVERITIES
#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <ceres/ceres.h>
#include <ceres/rotation.h>
#include "gflags/gflags.h"
#include "glog/logging.h"
using namespace std;
using namespace cv;
using namespace pcl;
using namespace cv::xfeatures2d;
// 代价函数
struct ReprojectCost
{
cv::Point2d observation;
ReprojectCost(cv::Point2d& observation) : observation(observation)
{
}
// 参数:内参、外参、三维点、反向投影误差
template <typename T>
bool operator()(const T* const intrinsic, const T* const extrinsic, const T* const pos3d, T* residuals) const
{
const T* r = extrinsic;
const T* t = &extrinsic[3];
T pos_proj[3];
ceres::AngleAxisRotatePoint(r, pos3d, pos_proj);
// Apply the camera translation
pos_proj[0] += t[0];
pos_proj[1] += t[1];
pos_proj[2] += t[2];
const T x = pos_proj[0] / pos_proj[2];
const T y = pos_proj[1] / pos_proj[2];
const T fx = intrinsic[0];
const T fy = intrinsic[1];
const T cx = intrinsic[2];
const T cy = intrinsic[3];
// Apply intrinsic
const T u = fx * x + cx;
const T v = fy * y + cy;
residuals[0] = u - T(observation.x);
residuals[1] = v - T(observation.y);
return true;
}
};
// 提取所有图像的特征点 及 特征点处的RGB
void extract_features(vector<string>& image_names, vector<vector<KeyPoint>>& keypoints_for_all, vector<Mat>& descriptor_for_all, vector<vector<Vec3b>>& colors_for_all);
// ratio & symmetry test
void ratioTest(vector<vector<DMatch>> &matches, vector<DMatch> &goodMatches);
void symmetryTest(const vector<DMatch>& matches1, const vector<DMatch>& matches2, vector<DMatch>& symMatches);
// 匹配所有特征点
void match_features(vector<Mat>& descriptor_for_all, vector<vector<DMatch>>& matches_for_all);
// 由匹配对提取特征点对
void get_matched_points(vector<KeyPoint> keypoints1,vector<KeyPoint> keypoints2,vector<DMatch> goodMatches,vector<Point2f>& points1,vector<Point2f>& points2);
// 获取匹配点的RGB
void get_matched_colors(vector<Vec3b>& color1, vector<Vec3b>& color2, vector<DMatch> matches, vector<Vec3b>& out_c1, vector<Vec3b>& out_c2);
// 剔除p1中mask值为0的元素
void maskout_points(vector<Point2f>& p1, Mat& mask);
void maskout_colors(vector<Vec3b>& p1, Mat& mask);
// 重建前2张图片
void reconstruct_first2imgs(Mat K, vector<vector<KeyPoint>>& key_points_for_all, vector<vector<Vec3b>>& colors_for_all, vector<vector<DMatch>>& matches_for_all, vector<Point3d>& structure, vector<vector<int>>& correspond_struct_idx, vector<Vec3b>& colors, vector<Mat>& rotations, vector<Mat>& translations);
// 三维重建
// 前两张图片重建
void reconstruct(Mat& K, vector<Point2f>& points1, vector<Point2f>& points2, Mat& R, Mat& t, Mat& mask, vector<Point3d>& points3D);
// 后续图片重建
void reconstruct(Mat& K, Mat& R1, Mat& T1, Mat& R2, Mat& T2, vector<Point2f>& points1, vector<Point2f>& points2, vector<Point3d>& points3D);
// 获得三维点与对应的像素点
void get_objpoints_and_imgpoints(vector<DMatch>& matches, vector<int>& struct_indices, vector<Point3d>& structure, vector<KeyPoint>& key_points, vector<Point3d>& object_points, vector<Point2f>& image_points);
// 点云融合
void fusion_pointscloud(vector<DMatch>& matches, vector<int>& struct_indices, vector<int>& next_struct_indices, vector<Point3d>& structure, vector<Point3d>& next_structure, vector<Vec3b>& colors, vector<Vec3b>& next_colors);
// BA优化
void bundle_adjustment(Mat& intrinsic, vector<Mat>& extrinsics, vector<vector<int>>& correspond_struct_idx, vector<vector<KeyPoint>>& key_points_for_all, vector<Point3d>& structure);
int main(int argc, char* argv[])
{
vector<string> img_names;
img_names.push_back(".\\images\\000.png");
img_names.push_back(".\\images\\001.png");
img_names.push_back(".\\images\\002.png");
img_names.push_back(".\\images\\003.png");
img_names.push_back(".\\images\\004.png");
//img_names.push_back(".\\images\\005.png");
//img_names.push_back(".\\images\\006.png");
//img_names.push_back(".\\images\\007.png");
//img_names.push_back(".\\images\\008.png");
//img_names.push_back(".\\images\\009.png");
Mat K = (Mat_<double>(3, 3) << 2759.48, 0, 1520.69, 0, 2764.16, 1006.81, 0, 0, 1); // Fountain的内参数矩阵
vector<vector<KeyPoint>> key_points_for_all;
vector<Mat> descriptor_for_all;
vector<vector<Vec3b>> colors_for_all; // 以图片为一个vector单元,存放所有特征点的RGB,防止混淆
vector<vector<DMatch>> matches_for_all;
// 提取所有图像的特征点
extract_features(img_names, key_points_for_all, descriptor_for_all, colors_for_all);
// 对所有图像进行顺次的特征匹配
match_features(descriptor_for_all, matches_for_all);
// 重建前两张图片
vector<Point3d> points3D; // 存放重建后所有三维点
vector<vector<int>> correspond_struct_idx; // 若第i副图像中第j特征点对应位置的值是N,则代表该特征点对应的是重建后的第N个三维点
vector<Vec3b> colors; // 存放重建后所有三维点的RGB(作为最终重建结果,不需要以图片为单元分隔,)
vector<Mat> rotations; // 所有相机相对第一个相机的旋转矩阵
vector<Mat> translations; // 所有相机相对第一个相机的平移矩阵
cout << "key_points_for_all.size() = " << key_points_for_all.size() << endl;
cout << "matches_for_all.size() = " << matches_for_all.size() << endl;
reconstruct_first2imgs(
K,
key_points_for_all,
colors_for_all,
matches_for_all,
points3D,
correspond_struct_idx,
colors,
rotations,
translations);
// 增量方式重建剩余的图像
for (int i = 1; i < matches_for_all.size(); ++i)
{
vector<Point3d> object_points;
vector<Point2f> image_points;
Mat r, R, T;
// 获取第i副图像中匹配点对应的三维点,以及在第i+1副图像中对应的像素点
get_objpoints_and_imgpoints(
matches_for_all[i],
correspond_struct_idx[i],
points3D,
key_points_for_all[i + 1],
object_points,
image_points
);
// 求解变换矩阵:空间中的点与图像中的点的对应关系,即可求解相机在空间中的位置
solvePnPRansac(object_points, image_points, K, noArray(), r, T);
// 将旋转向量转换为旋转矩阵
Rodrigues(r, R);
// 保存变换矩阵
rotations.push_back(R);
translations.push_back(T);
// 根据之前求得的R, T进行三维重建
vector<Point2f> p1, p2;
vector<Vec3b> c1, c2;
get_matched_points(key_points_for_all[i], key_points_for_all[i + 1], matches_for_all[i], p1, p2);
get_matched_colors(colors_for_all[i], colors_for_all[i + 1], matches_for_all[i], c1, c2);
vector<Point3d> next_points3D;
reconstruct(K, rotations[i], translations[i], R, T, p1, p2, next_points3D);
//将新的重建结果与之前的融合
fusion_pointscloud(
matches_for_all[i],
correspond_struct_idx[i],
correspond_struct_idx[i + 1],
points3D,
next_points3D,
colors,
c1
);
cout << "processing " << i - 1 << "-" << i << endl;
}
// BA优化
Mat intrinsic(Matx41d(K.at<double>(0, 0), K.at<double>(1, 1), K.at<double>(0, 2), K.at<double>(1, 2)));
vector<Mat> extrinsics;
for (size_t i = 0; i < rotations.size(); ++i)
{
Mat extrinsic(6, 1, CV_64FC1);
Mat r;
Rodrigues(rotations[i], r);
r.copyTo(extrinsic.rowRange(0, 3));
translations[i].copyTo(extrinsic.rowRange(3, 6));
extrinsics.push_back(extrinsic);
}
bundle_adjustment(intrinsic, extrinsics, correspond_struct_idx, key_points_for_all, points3D);
// PCL可视化
PointCloud<PointXYZRGB>::Ptr cloud(new PointCloud<PointXYZRGB>);
boost::shared_ptr<visualization::PCLVisualizer> viewer(new visualization::PCLVisualizer("3D viewer")); // 实例化PCLVisualizer对象,窗口命名为3D viewer
for (size_t i = 0; i < points3D.size(); i++)
{
PointXYZRGB p;
p.x = points3D[i].x;
p.y = points3D[i].y;
p.z = points3D[i
没有合适的资源?快使用搜索试试~ 我知道了~
SfM三维重建:BA优化【VS2015+OpenCV3.4+PCL1.8+Ceres Solver】
共12个文件
png:10个
cpp:2个
2星 需积分: 46 74 下载量 87 浏览量
2019-03-15
13:49:20
上传
评论 13
收藏 60.5MB ZIP 举报
温馨提示
使用BA优化对SfM三维重建的结果进行非线性优化,环境:VS2015+OpenCV3.4+PCL1.8+Ceres Solver,包含两张图、多张图BA优化代码及images文件。
资源推荐
资源详情
资源评论
收起资源包目录
SfM三维重建:BA优化.zip (12个子文件)
images
002.png 6.09MB
006.png 5.97MB
008.png 6.18MB
003.png 6.07MB
000.png 5.77MB
001.png 5.98MB
009.png 6.24MB
004.png 6.05MB
007.png 6.05MB
005.png 6.07MB
BA_MultiView_Rec.cpp 23KB
BA_Signle2Images.cpp 13KB
共 12 条
- 1
资源评论
- 王小宝60002019-10-23多视图BA优化后为什么只显示几个点
- lvan12342019-09-05真是垃圾什么用也没有$南山种豆$2019-09-06你确定下载过这个文件?最近一次下载是一个月前的。而且文件内容是什么,描述写的很清楚,选择适合自己的,不要盲目下载。ps:11分是系统刷上去的,我现在也改不了,请大家慎重选择
$南山种豆$
- 粉丝: 227
- 资源: 15
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功