@zhuqingzhang
2019-06-26T03:17:56.000000Z
字数 15216
阅读 1627
主要是通过Signature这一个类来获取生成的点云地图以及我们所需要的各种信息。我们通过解包.db文件,可以获得一个由Signature组成的vector,实际上,每个Signature对应了一帧图像数据。
namespace rtabmap
{
class RTABMAP_EXP Signature
{
...
private:
int _id; ///signature的id,也对应的是图像的id。
int _mapId;
double _stamp; ///对应的时间戳
std::map<int, Link> _links; // id, transform
std::map<int, Link> _landmarks;
int _weight;
std::string _label;
bool _saved; // If it's saved to bd
bool _modified;
bool _linksModified; // Optimization when updating signatures in database
// Contains all words (Some can be duplicates -> if a word appears 2
// times in the signature, it will be 2 times in this list)
// Words match with the CvSeq keypoints and descriptors
std::multimap<int, cv::KeyPoint> _words; // word <id, keypoint> /// 包含着每张图片的关键点
std::multimap<int, cv::Point3f> _words3; // word <id, point> // in base_link frame (localTransform applied)) ///包含着每张图片的关键点对应的在车身坐标系下的3d点
std::multimap<int, cv::Mat> _wordsDescriptors; ///包含着每个特征点对应的描述子
std::map<int, int> _wordsChanged; // <oldId, newId>
bool _enabled;
int _invalidWordsCount;
Transform _pose; ///bask_link的位姿。
Transform _groundTruthPose;
std::vector<float> _velocity;
SensorData _sensorData; ///从中获得相机的信息,包括原始的图像信息。
};
}
SensorData包含着各种传感器的数据信息。对于我们来说,主要是相机的信息。
namespace rtabmap
{
class RTABMAP_EXP SensorData
{
int _id; ///传感器数据的id,这个id和Signature中的id是相同的。
double _stamp;
cv::Mat _imageCompressed; // compressed image
cv::Mat _depthOrRightCompressed; // compressed image
LaserScan _laserScanCompressed; // compressed data
cv::Mat _imageRaw; // CV_8UC1 or CV_8UC3 ///主要通过该变量获得相机得到的原始图片。注:并不是所有的Signature中的SensorData的_imageRaw都有数据。_imageRaw只保留关键帧的图像。在程序中每三帧为一个关键帧图像。
cv::Mat _depthOrRightRaw; // depth CV_16UC1 or CV_32FC1, right image CV_8UC1 ///如果传入的是RGB-D数据,则通过这个变量来获得深度图片的数据。如果传入的是双目图像,则通过该变量来获得右目图像
LaserScan _laserScanRaw;
std::vector<CameraModel> _cameraModels;
StereoCameraModel _stereoCameraModel; ///通过该变量来获得相机的内外参数。
// user data
cv::Mat _userDataCompressed; // compressed data
cv::Mat _userDataRaw;
// occupancy grid
cv::Mat _groundCellsCompressed;
cv::Mat _obstacleCellsCompressed;
cv::Mat _emptyCellsCompressed;
cv::Mat _groundCellsRaw;
cv::Mat _obstacleCellsRaw;
cv::Mat _emptyCellsRaw;
float _cellSize;
cv::Point3f _viewPoint;
// environmental sensors
EnvSensors _envSensors;
// landmarks
Landmarks _landmarks;
// features 注:此处的keypoints,descriptor, keypoints3D都是空的。不能通过这个变量来获得相关信息。但是可以通过Signature中的变量获得相关信息。
std::vector<cv::KeyPoint> _keypoints;
std::vector<cv::Point3f> _keypoints3D;
cv::Mat _descriptors;
Transform groundTruth_;
Transform globalPose_;
cv::Mat globalPoseCovariance_; // 6x6 double
GPS gps_;
IMU imu_;
};
}
int main(int argc, char * argv[])
{
if(argc < 2)
{
showUsage();
}
std::string dbPath = argv[argc-1];
// Get parameters
ParametersMap parameters;
DBDriver * driver = DBDriver::create();
if(driver->openConnection(dbPath)) ///载入.db数据
{
cout<<"loading database..."<<endl;
parameters = driver->getLastParameters();
driver->closeConnection(false);
}
else
{
UERROR("Cannot open database %s!", dbPath.c_str());
}
delete driver;
cout<<"database is successfully loaded!"<<endl;
// Get the global optimized map
Rtabmap rtabmap;
cout<<"initial rtabmap..."<<endl;
rtabmap.init(parameters, dbPath);
cout<<"initial finished..."<<endl;
std::map<int, Signature> nodes;
std::map<int, Transform> optimizedPoses;
std::multimap<int, Link> links;
std::map<int,double> stamps;
//rtabmap.get3DMap(nodes, optimizedPoses, links, true, true);
cout<<"getting3D map..."<<endl;
rtabmap.get3DMap(nodes, optimizedPoses, links, false, true); //获取各个数据,存在nodes中。optimizedPoses里有每一张图片对应的base_linke的位姿。第四个参数表示是否进行全局优化。由于我们传入了参考轨迹,优化后的效果会不好,所以不进行全局优化。
ofstream file,file_2;
file.open("poses.txt");
file_2.open("image_id.txt");
ifstream ifile;
int decimation=2;
int tmpNoNaN_index=0;
double count_line_point=0;
map<int,pcl::PointCloud<pcl::PointXYZRGB>::Ptr> index_cloud_tmpNoNaN;
map<int,vector<int>> index_map;
vector<cloudpoint_line> clouds_lines_infos; //save final info;
cout<<"indeces number: "<<optimizedPoses.size()<<endl;
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr line_cloudpoints(new pcl::PointCloud<pcl::PointXYZRGB>);
for(std::map<int, Transform>::iterator iter=optimizedPoses.begin(); iter!=optimizedPoses.end(); ++iter) //对每一帧数据进行遍历
{
Signature node = nodes.find(iter->first)->second; //获得当前帧的Signaure数据。(Signaure的数据内容见前边第一部分的说明)
stamps.insert(make_pair(iter->first,node.getStamp()));
// uncompress data
node.sensorData().uncompressData();
cout<<"processing index "<<node.id()<<"("<<iter->first<<")/"<<optimizedPoses.size()<<endl;
pcl::PointCloud<pcl::PointXYZRGB>::Ptr tmp = util3d::cloudRGBFromSensorData(
node.sensorData(),
decimation, // image decimation before creating the clouds
20.0f, // maximum depth of the cloud
0.0f);
//通过该函数获得当前帧对应的点云数据。注意,此时得到的tmp点云是带结构的点云,其高度和宽度为图像的高度和宽度。temp中位于(u,v)出的点就对应于原始图像中位于(u,v)处的点。
pcl::PointCloud<pcl::PointXYZRGB>::Ptr tmpNoNaN(new pcl::PointCloud<pcl::PointXYZRGB>);
std::vector<int> index;
pcl::removeNaNFromPointCloud(*tmp, *tmpNoNaN, index); //移除点云数据中包含值为无穷的点,并存放在tmpNoNaN中。注意,此时tmpNoNaN中的点云数据已经是无结构的点云,其高度为1。所以我们如果需要通过tmpNoNaN找到图像中的像素坐标,需要有一个索引。这个索引存放在index中。有如下对应关系: tmpNoNaN->points[i]=tmp->points[index[i]];
if(!tmpNoNaN->empty())
{
index_cloud_tmpNoNaN.insert(make_pair(tmpNoNaN_index,tmpNoNaN));
index_map.insert(make_pair(tmpNoNaN_index,index));
*cloud += *util3d::transformPointCloud(tmpNoNaN, iter->second); // transform the point cloud to its pose,将每一帧的点云叠加在一起,得到最终的点云。
}
}
if(cloud->size())
{
//存储点云
printf("Voxel grid filtering of the assembled cloud (voxel=%f, %d points)\n", 0.01f, (int)cloud->size());
cloud = util3d::voxelize(cloud, 0.01f);
printf("Saving rtabmap_cloud.pcd... done! (%d points)\n", (int)cloud->size());
//pcl::io::savePCDFile("rtabmap_cloud.pcd", *cloud);
pcl::io::savePLYFile("kitti_00_clouds.ply", *cloud); // to save in PLY format
}
else
{
printf("Saving rtabmap_cloud.ply... failed! The cloud is empty.\n");
}
rtabmap.close(false);
return 0;
}
此程序是在2.中的程序的基础上扩展而成的。需要注意的是,在通过util3d::cloudRGBFromSensorData得到的点云tmp是带结构的点云。点云的宽度和高度与图像对应。所以可以将图像中的(u,v)位置的像素与点云中(u,v)位置的像素相对应。但是最终生成的点云cloud是无结构的点云。其高度为1,相当于用一个vector存放了所有的点云。所以需要一个index将tmp中的id信息与最终的cloud的id信息相对应。具体的程序如下:
/*定义了两个结构体*/
//用于存放每个车道线像素点的信息
typedef struct line_info{
int line_id_; 该车道线像素点对应的车道线的id
int line_type_; 该车道线像素点对应的车道线的类型
int line_color_id_; 该车道线像素点对应的车道线的颜色id
int b_; 该车道线像素点的BGR值
int g_;
int r_;
int u_; 该车道线像素点对应的图像中的uv坐标
int v_;
}Line_Info;
//用于存放车道线和点云的对应关系
typedef struct cloud_with_line{
Line_Info line_info_; //某条车道线上的某个像素点的信息
int cloud_id_; //该像素点在整个点云中的id
int image_id_; //该像素点是在哪一张图片上的
int cloud_begin_id_; //该张图像所生成的这一帧点云数据在整个点云中的起始id和终止id
int cloud_end_id_;
}cloudpoint_line;
/**咱们直接从大的for循环开始介绍**/
int tmpNoNaN_index=0; //用来记录tmpNoNaN的数量。仅当tmpNoNaN不为空的时候tmpNoNaN_index++
for(std::map<int, Transform>::iterator iter=optimizedPoses.begin(); iter!=optimizedPoses.end(); ++iter)
{
Signature node = nodes.find(iter->first)->second; //获得当前帧的Signaure数据
stamps.insert(make_pair(iter->first,node.getStamp()));
// uncompress data
node.sensorData().uncompressData();
cout<<"processing index "<<node.id()<<"("<<iter->first<<")/"<<optimizedPoses.size()<<endl;
pcl::PointCloud<pcl::PointXYZRGB>::Ptr tmp = util3d::cloudRGBFromSensorData(
node.sensorData(),
decimation, // image decimation before creating the clouds
20.0f, // maximum depth of the cloud
0.0f);
//获得当前帧对应的图像所生成的带结构的点云。并不是每个node都有图像的。所以该函数生成的点云也只对应于关键帧生成点云。
pcl::PointCloud<pcl::PointXYZRGB>::Ptr tmpNoNaN(new pcl::PointCloud<pcl::PointXYZRGB>);
std::vector<int> index;
pcl::removeNaNFromPointCloud(*tmp, *tmpNoNaN, index); //滤除无效的点,获得无结构的当前帧点云。有结构点云tmp和无结构点云tmpNoNaN的对应关系为: tmpNoNaN->points[i]=tmp->points[index[i]];
if(!tmpNoNaN->empty())
{
index_cloud_tmpNoNaN.insert(make_pair(tmpNoNaN_index,tmpNoNaN)); //用于存储tmpNoNaN_index和tmpNoNaN。后续通过tmpNoNaN_index来获得tmpNoNaN
index_map.insert(make_pair(tmpNoNaN_index,index)); //用于存储tmpNoNaN_index和index。
//有了以上两个map映射,就可以通过tmpNoNaN_index找到对应的index和tmpNoNaN,从而通过tmpNoNaN->points[i]=tmp->points[index[i]]这一对应关系获得tmpNoNaN中的每个点云在图上中的像素点位置。
*cloud += *util3d::transformPointCloud(tmpNoNaN, iter->second); // transform the point cloud to its pose //将每一帧点云累加起来,得到最终的点云。
}
Transform pose_signature=node.getPose(); //通过signature来获取位姿
Transform pose_optimize=iter->second; //通过optimizedPoses来获取位姿
Eigen::Quaternionf q_s = pose_signature.getQuaternionf();
Eigen::Quaternionf q_o = pose_optimize.getQuaternionf();
//signature's poses without optimize.
file<<"signature"<<" "<<to_string(pose_signature.x())<<" "<<to_string(pose_signature.y())<<" "<<to_string(pose_signature.z())<<" "
<<to_string(q_s.x())<<" "<<to_string(q_s.y())<<" "<<to_string(q_s.z())<<" "<<to_string(q_s.w())<<endl;
file<<"optimize"<<" "<<to_string(pose_optimize.x())<<" "<<to_string(pose_optimize.y())<<" "<<to_string(pose_optimize.z())<<" "
<<to_string(q_o.x())<<" "<<to_string(q_o.y())<<" "<<to_string(q_o.z())<<" "<<to_string(q_o.w())<<endl; //将两种位姿输出做比较:当optimizedPoses是未进行优化的位姿时,这两种位姿是相同的
SensorData data=node.sensorData(); //获得传感器数据,包括左右目图像。
StereoCameraModel model=data.stereoCameraModel(); //获得双目相机的模型,包括内外参数
// cout<<"local_transform: "<<endl<<model.left().localTransform()<<endl;
if(!data.imageRaw().empty()) //并不是每个node都存储图片,node仅存储关键帧的图像。
{
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_temp(new pcl::PointCloud<pcl::PointXYZRGB>);
cloud_temp=cloud;
cout<<"cloud_temp's size: "<<cloud_temp->width<<"*"<<cloud_temp->height<<endl;
cv::Mat outimage,outimage_;
outimage=cv::Mat::zeros(data.imageRaw().rows,data.imageRaw().cols,data.imageRaw().type());
file_2<<"signature_id: "<<node.id()<<" "<<"sensordata_id: "<<data.id()<<endl; //signature的id和sensordata的id是完全相同的,两者都是从1开始累计。而我们本身的图像名称是从0开始的。所以说,node.id()=image_name_id+1;
cout<<"image_id: "<<data.id()<<endl;
//imshow("image",data.imageRaw());
cv::Mat line_image;
string input=std::to_string((data.id()-1)); //用input来表示图像的名称。
while(input.size()<10) //用0来填充,保证名称是10位的。
{
input="0"+input;
}
string line_image_name=line_image_dir+input+".png"; //分割出的车道线的图像名称。车道线图像名称和输入的原始双目图像的名称是一一对应的。
string line_info_name=info_dir+input+".txt"; //对应的车道线的信息
line_image=cv::imread(line_image_name); //读入车道线图像
if(line_image.empty()) //因为有的image_name_id对应的图像是没有车道线的,所以此时不存在该图像,则continue进行下一次循环。
{
cout<<"the image "<<data.id()-1<<" doesn't exist..."<<endl;
tmpNoNaN_index++; //别忘了给tmpNoNaN_index加一。
continue;
}
ifile.open(line_info_name.data()); //读入当前图像对应的车道线的信息
if(!ifile.is_open())
{
cout<<"cannot open the line_info_file..."<<endl;
tmpNoNaN_index++;
continue;
}
string str,result;
Line_Info lineinfo;
vector<Line_Info> lineinfos; //用于存放一帧图片中的所有车道线的信息
lineinfos.clear();
while (getline(ifile,str)) {
int count=0;
stringstream line;
line<<str;
while(line>>result)
{
count++;
}
if(count!=6) //由于一些文件的车道线信息元素不全,所以需要进行判断。当车道线信息不全时,不考虑该条车道线。
{
continue;
}
line.clear();
line<<str;
//cout<<"line: "<<str<<endl;
line>>result;
lineinfo.line_id_=atoi(result.c_str());
// cout<<"line_id_:"<<lineinfo.line_id_<<" ";
line>>result;
lineinfo.line_type_=atoi(result.c_str());
//cout<<"line_type_:"<<lineinfo.line_type_<<" ";
line>>result;
lineinfo.line_color_id_=atoi(result.c_str());
// cout<<"line_color_id_:"<<lineinfo.line_color_id_<<" ";
line>>result;
lineinfo.b_=atoi(result.c_str());
// cout<<"b_:"<<lineinfo.b_<<" ";
line>>result;
lineinfo.g_=atoi(result.c_str());
// cout<<"g_:"<<lineinfo.g_<<" ";
line>>result;
lineinfo.r_=atoi(result.c_str());
// cout<<"r_:"<<lineinfo.r_<<endl;
lineinfos.push_back(lineinfo); // 将当前图片的每条车道线信息放入lineinfos中
}
ifile.close();
cloudpoint_line cloud_line_info; //用前边定义的结构体来存储点云和车道线中像素点的对应关系
int start=0;
for(int i=0;i<tmpNoNaN_index;i++)
{
start+=index_cloud_tmpNoNaN[i]->width; //计算当前帧的点云在整体点云中的起始id
}
cout<<"start: "<<start<<endl;
cout<<"tmpNoNan_index: "<<tmpNoNaN_index<<endl;
cout<<"end: "<<start+index_cloud_tmpNoNaN[tmpNoNaN_index]->width-1<<endl; //当前帧点云在整体点云中的终止id。
//遍历车道线图像中的每一个像素点
for(int v=0;v<line_image.rows;v++)
{
for(int u=0;u<line_image.cols;u++)
{
//获得当前点的BGR信息
int b=line_image.data[v*line_image.step+u*line_image.channels()];
int g=line_image.data[v*line_image.step+u*line_image.channels()+1];
int r=line_image.data[v*line_image.step+u*line_image.channels()+2];
//当BGR中有不为零的,说明该出位置有车道线
if(b!=0||r!=0||g!=0)
{
//遍历该帧对应的所有车道线信息,通过BGR信息来查找这个位置的像素对应的是哪一条车道线
for(int i=0;i<lineinfos.size();i++)
{
//当BGR的信息都对应上了,就说明该像素点对应的车道线是lineinfo[i]对应的车道线
if(b==lineinfos[i].b_&&g==lineinfos[i].g_&&r==lineinfos[i].r_)
{
lineinfos[i].u_=u; //存储像素点位置
lineinfos[i].v_=v;
//已经获得了Line_Info结构体所需要的所有的数据,将其放入cloudpoint_line结构体中
cloud_line_info.line_info_=lineinfos[i];
cloud_line_info.image_id_=data.id()-1; //注意图像的id要比data.id小1.
cloud_line_info.cloud_begin_id_=start;
cloud_line_info.cloud_end_id_=start+index_cloud_tmpNoNaN[tmpNoNaN_index]->width-1;
//通过uv像素坐标推测其id。(0,0)的id对应0,(0,1)的id对应1,以此类推
int id=v/(decimation)*(line_image.cols/decimation)+u/decimation;
int mark=0;
for(int index_id=0;index_id<index.size();index_id++)
{
//通过index来找到在tmpNoNaN中的id。
if(index[index_id]==id)
{
cloud_line_info.cloud_id_=start+index_id; //得到在整体cloud中的id
count_line_point++;
mark=1;
break;
}
}
if(mark==0)
{
//uv对应的点在tmpNoNaN中并没有对应的点,标记为-1
cloud_line_info.cloud_id_=-1; //the line point has no corresponding pointclouds.
}
// cout<<"insert the cloud_line_info into the final container..."<<endl;
clouds_lines_infos.push_back(cloud_line_info);
break;
}
}
}
}
}
//将从整体点云中分割出每一帧点云,并反投影到图片中
// for(int i=start,j=0;i<(start+(int)index_cloud_tmpNoNaN[tmpNoNaN_index]->width);i++,j++)
// {
// int id=index_map[tmpNoNaN_index][j]; //correspond to unfiltered pointclouds id
// int h=id/(outimage.cols/decimation)*decimation;
// int w=id%(outimage.cols/decimation)*decimation;
//// cout<<"i: "<<i<<" "<<"id: "<<id<<endl;
//// cout<<"h,w: "<<h<<","<<w<<endl;
//// outimage_.at<cv::Vec3b>(h,w)[0]=tmp->points[id].b;
//// outimage_.at<cv::Vec3b>(h,w)[1]=tmp->points[id].g;
//// outimage_.at<cv::Vec3b>(h,w)[2]=tmp->points[id].r;
// outimage.at<cv::Vec3b>(h,w)[0]=cloud_temp->points[i].b;
// outimage.at<cv::Vec3b>(h,w)[1]=cloud_temp->points[i].g;
// outimage.at<cv::Vec3b>(h,w)[2]=cloud_temp->points[i].r;
// }
cout<<"get line_clouds..."<<endl;
//遍历clouds_lines_info,获得所有的车道线信息
for(int i=0;i<clouds_lines_infos.size();i++)
{
if(clouds_lines_infos[i].image_id_==(data.id()-1))
{
int r=clouds_lines_infos[i].line_info_.r_;
int b=clouds_lines_infos[i].line_info_.b_;
int g=clouds_lines_infos[i].line_info_.g_;
int u=clouds_lines_infos[i].line_info_.u_;
int v=clouds_lines_infos[i].line_info_.v_;
// outimage.at<cv::Vec3b>(v,u)[0]=b;
// outimage.at<cv::Vec3b>(v,u)[1]=g;
// outimage.at<cv::Vec3b>(v,u)[2]=r;
pcl::PointXYZRGB point,point_info;
int point_id;
point_id=clouds_lines_infos[i].cloud_id_;
if(point_id!=-1)
{
point=cloud->points[point_id];
point.r=r;
point.b=b;
point.g=g;
line_cloudpoints->push_back(point);//存储每个车道线点云
}
}
}
tmpNoNaN_index++; //别忘了进行++操作
// 可视化每一步生成的车道线点云
// pcl::visualization::CloudViewer viewer_ply("viewer_ply");
// viewer_ply.showCloud(line_cloudpoints);
// while(!viewer_ply.wasStopped())
// {
// }
//get keypoints
// multimap<int,cv::KeyPoint> words=node.getWords();
// vector<cv::KeyPoint> kps;
/* if(words.empty())
cout<<"words are empty..."<<endl;
else {
for (auto it = words.begin(); it != words.end(); it = words.upper_bound(it->first)){
std::cout <<"key = " << it->first <<std::endl;
std::pair<std::multimap<int,cv::KeyPoint>::iterator,std::multimap<int,cv::KeyPoint>::iterator> lu = words.equal_range(it->first);
for (auto i = lu.first; i != lu.second; i++){
std::cout << i->first << " = " << i->second.pt << std::endl;
kps.push_back(it->second);
}
}
cout<<"key points size: "<<kps.size()<<endl;
cv::drawKeypoints(data.imageRaw(),kps,outimage);
cv::imshow("keypoint",outimage);
cv::waitKey(0);
}*/
// cv::imshow("outimage",outimage);
//// cv::imshow("outimage_",outimage_);
// cv::imshow("image_raw",data.imageRaw());
// cv::waitKey(0);
}
}
file.close();
file_2.close();
cout<<"finish cloud_process..."<<endl;
cout<<"line_cloudpoints size: "<<to_string(line_cloudpoints->points.size())<<" "<<to_string(count_line_point)<<endl;
pcl::io::savePLYFile("line_cloudpoints.ply", *line_cloudpoints); //存储车道线点云
pcl::visualization::CloudViewer viewer_ply("viewer_ply"); // 可视化车道线点云
viewer_ply.showCloud(line_cloudpoints);
while(!viewer_ply.wasStopped())
{
}