#include "stdafx.h"
#include<iostream>
#include"Camera.h"
#include"CPointcloud.h"
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv2\opencv.hpp>
#include<opencv2\highgui\highgui.hpp>
#include<opencv2\core\core.hpp>
#include<string>
#include<fstream>
#include<vector>
#include<time.h>
#include <cv.h>
using namespace std;
//存PLY格式点云的子函数
void save_PLY(vector<ColorPointCloud> vPointCloud, string s)
{
char vertex_num[8];
_itoa_s (vPointCloud.size(), vertex_num, 10);
string ims = "element vertex ";
string header = ims + vertex_num;
ofstream outfile(s, std::ios::ate | std::ios::binary);
outfile << "ply" << endl;
outfile << "format binary_little_endian 1.0" << endl;
outfile << "comment VCGLIB generated" << endl;
outfile << header << endl;
outfile << "property float x" << endl;
outfile << "property float y" << endl;
outfile << "property float z" << endl;
outfile << "property uchar red" << endl;
outfile << "property uchar green" << endl;
outfile << "property uchar blue" << endl;
outfile << "property uchar alpha" << endl;
outfile << "element face 0" << endl;
outfile << "property list uchar int vertex_indices" << endl;
outfile << "end_header" << endl;
for (int i = 0; i< vPointCloud.size(); i++)
{
outfile.write((char*)(&vPointCloud[i]), sizeof(vPointCloud[i]));
}
outfile.close();
}
int main()
{
clock_t nTimeStart;//计时开始
clock_t nTimefinish; //计时结束
nTimeStart = clock();
//定义彩色相机和深度相机
Camera MyColorCam;
DepthCamera MyDepthCam;
//定义彩色点云
vector<ColorPointCloud> MyPointCloud;
ColorPointCloud Apoint;
//读入校准参数
///可以用新的Mat读入,访问元素时用at访问
//cv::Mat CIntrinsic=(CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsRGB1.xml");
//cv::Mat DIntrinsic=(CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsIR1.xml");
//for(int i=0;i<CIntrinsic.rows;i++)
// for(int j=0;j<CIntrinsic.cols;j++)
// {
// CK[i*j+j]=CIntrinsic.at<float>(i,j);
// DK[i*j+j]=DIntrinsic.at<float>(i,j);
// }
//旧的CvMat
//相机自身参数
CvMat *CIntrinsic = (CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsRGB2.xml");
CvMat *DIntrinsic = (CvMat*)cvLoad("..\\..\\camera2_data\\IntrinsicsIR2.xml");
//相机畸变
CvMat *DistortionRGB1 = (CvMat*)cvLoad("..\\..\\camera2_data\\DistortionRGB2.xml");
CvMat *DistortionIR1 = (CvMat*)cvLoad("..\\..\\camera2_data\\DistortionIR2.xml");
//
CvMat *dc_a1 = (CvMat*)cvLoad("..\\..\\camera2_data\\dc_a2.xml"); //a0,a1
CvMat *dc_b1 = (CvMat*)cvLoad("..\\..\\camera2_data\\dc_b2.xml"); //beta
CvMat *dc1 = (CvMat*)cvLoad("..\\..\\camera2_data\\dc2.xml"); //转回openNI所用的a b
//深度和彩色之间的外参
CvMat *R1 = (CvMat*)cvLoad("..\\..\\camera2_data\\R2.xml");
CvMat *T1 = (CvMat*)cvLoad("..\\..\\camera2_data\\T2.xml");
//将读取相机自身参数存储到K中
// Intrinsic parameters
MyColorCam.SetCameraIntrisic(CIntrinsic);
MyDepthCam.SetCameraIntrisic(DIntrinsic);
//将读取畸变系数存储到Distor中
MyColorCam.SetCameraDistort(DistortionRGB1);
MyDepthCam.SetCameraDistort(DistortionIR1);
////将读取的a0,a1 赋给深度相机
MyDepthCam.setmda(dc_a1);
//将读取的a,b 赋给深度相机,用于反转换深度值
MyDepthCam.setmDC(dc1);
//将每个像素的修正值beta 赋给深度相机
MyDepthCam.setmdb_(dc_b1);
//将深度相机和彩色相机的相对位置赋给深度相机
MyDepthCam.setR(R1);
MyDepthCam.setT(T1);
//读入彩色
cv::Mat CImg = cv::imread("..\\..\\camera2_data\\data\\3c1125.jpg");
if (CImg.data == 0) //判断图片的路径是否正确
{
cout << "error in loading ColorImage" << endl;
exit(1);
}
if (CImg.channels() != 3)
{
cout << " not a color image" << endl;
exit(1);
}
//读入深度图将深度值存在二维数组里并修正其深度
float(*DImg)[640] = new float[480][640]; //需要用new从堆分配内存,直接分配会内存溢出
ifstream f("..\\..\\camera2_data\\data\\3d1125.txt", ios::in | ios::binary); //读入深度图,并且测试是否正确
if (!f)
{
cout << "error in loading DepthImage" << endl;
exit(1);
}
for (int i = 0; i<480; i++)
{
for (int j = 0; j<640; j++)
{
float Xn; //转换成3D的x y
float Yn;
float Xd; //畸变修正过的x,y
float Yd;
float u; //深度投影到彩色的索引,
float v;
float Pos[3]; //存储当前像素的3D位置
int Color[4]; //存储当前点的r g b 值
float NPos[3];
f.read(reinterpret_cast<char *>(&DImg[i][j]), sizeof(float)); //从二进制文件中读取深度值,需要指定读取长度。
DImg[i][j] = MyDepthCam.ConvertOpenNIDepthToRawDisparity(DImg[i][j]);
DImg[i][j] = MyDepthCam.UndistortDisparity(DImg[i][j], i, j); //形参类型为float型,做下转换
DImg[i][j] = MyDepthCam.DisparityToDepth(DImg[i][j]);
MyDepthCam.ConvertProjectiveToNormalized(j, i, Xn, Yn); //********注意i j的顺序 j是fx方向, i是fy方向
MyDepthCam.DistortNormalized(Xn, Yn, Xd, Yd);
//记录下3D位置,并且赋给点云
Pos[0] = Xd*DImg[i][j];
Pos[1] = Yd*DImg[i][j];
Pos[2] = DImg[i][j];
//转换到彩色相机坐标系
MyDepthCam.Transform3DPoint(Pos, NPos);
//获取颜色 注意CV是BGR排序
MyColorCam.ConvertRealWorld3DToProjective(NPos, u, v);
//如果在图片索引范围内则加入点云
if (u >= 0 && u<640 && v >= 0 && v<480 && &Pos[2] != 0)
{
Apoint.SetPointPos(Pos);
Color[0] = (int)CImg.at<cv::Vec3b>(v, u)[2]; //*****注意u v 的顺序
Color[1] = (int)CImg.at<cv::Vec3b>(v, u)[1];
Color[2] = (int)CImg.at<cv::Vec3b>(v, u)[0];
Color[3] = 255;
Apoint.SetPointColor(Color);
}
else
continue;
//存进点云中
MyPointCloud.push_back(Apoint);
//
}
//cout<<endl;
}
delete[]DImg; //删除分配的内存
//存储的点云路径
string s = "..\\..\\camera2_data\\res\\1125.ply";
save_PLY(MyPointCloud, s);
nTimefinish = clock();
cout << "共耗时:" << (double)(nTimefinish - nTimeStart) / CLOCKS_PER_SEC << "秒" << endl;
return 0;
}