北京模板网站建设全包,Wordpress 标签无用,手机兼职赚钱软件,嵊州网页设计文章目录DLO-SLAM点评代码解析OdomNode代码结构主函数 main激光回调函数 icpCB初始化 initializeDLO重力对齐 gravityAlign点云预处理 preprocessPoints关键帧指标 computeMetrics设定关键帧阈值setAdaptiveParams初始化目标数据 initializeInputTarget设置源数据 setInputSour…
文章目录DLO-SLAM点评代码解析OdomNode代码结构主函数 main激光回调函数 icpCB初始化 initializeDLO重力对齐 gravityAlign点云预处理 preprocessPoints关键帧指标 computeMetrics设定关键帧阈值setAdaptiveParams初始化目标数据 initializeInputTarget设置源数据 setInputSources得到下一个位姿 getNextPoseImu得帧间 integrateIMU得到 getSubmapKeyframes取k个最近帧下标 pushSubmapIndices关键帧凸包 computeConvexHull更新关键帧updateKeyframesimu回调 imuCBMapNode主函数构造函数 MapNode关键帧回调 keyframeCB保存地图 savePcd相关点云曲面重建ConvexHull凸包ConcaveHull凹包EndDLO-SLAM
这篇SLAM论文《Direct LiDAR Odometry: Fast Localization with Dense Point Clouds》作为NASA喷气推进实验室CoSTAR团队研究和开发的新作收到了学术界广泛的关注。其主要用作DARPA地下挑战的里程计提出了一种能够实现高速高精度处理高速实时处理密集点云的激光里程计(LO)的思路下面是他们的Github开源代码。
点评
代码有些小trick还是很不错的。本文采用稠密点云进行匹配说是精度比提取特征(loam系列)的高。由于计算量大的问题本文首先不进行点云去畸变降低耗时同时 动态确定 阈值筛选关键帧自适应阈值筛选关键帧窄道时阈值会缩小。采用 激光与激光匹配和激光与submap匹配其中submap采用关键帧空间构建子图。子图由 邻近关键帧关键帧凸包关键帧凹包构成基于帧标记确定是否需要重新生成。本文采用 NanoGIcp 轻量级 激光点匹配。子图协方差、法向量由链接关键帧近似代码中采用scan2scan直接赋值不需重新计算
代码解析
OdomNode
代码结构
#mermaid-svg-OLPGxGPhDiOvnRO8 {font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:16px;fill:#333;}#mermaid-svg-OLPGxGPhDiOvnRO8 .error-icon{fill:#552222;}#mermaid-svg-OLPGxGPhDiOvnRO8 .error-text{fill:#552222;stroke:#552222;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edge-thickness-normal{stroke-width:2px;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-svg-OLPGxGPhDiOvnRO8 .marker{fill:#333333;stroke:#333333;}#mermaid-svg-OLPGxGPhDiOvnRO8 .marker.cross{stroke:#333333;}#mermaid-svg-OLPGxGPhDiOvnRO8 svg{font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:16px;}#mermaid-svg-OLPGxGPhDiOvnRO8 g.classGroup text{fill:#9370DB;fill:#131300;stroke:none;font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:10px;}#mermaid-svg-OLPGxGPhDiOvnRO8 g.classGroup text .title{font-weight:bolder;}#mermaid-svg-OLPGxGPhDiOvnRO8 .nodeLabel,#mermaid-svg-OLPGxGPhDiOvnRO8 .edgeLabel{color:#131300;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edgeLabel .label rect{fill:#ECECFF;}#mermaid-svg-OLPGxGPhDiOvnRO8 .label text{fill:#131300;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edgeLabel .label span{background:#ECECFF;}#mermaid-svg-OLPGxGPhDiOvnRO8 .classTitle{font-weight:bolder;}#mermaid-svg-OLPGxGPhDiOvnRO8 .node rect,#mermaid-svg-OLPGxGPhDiOvnRO8 .node circle,#mermaid-svg-OLPGxGPhDiOvnRO8 .node ellipse,#mermaid-svg-OLPGxGPhDiOvnRO8 .node polygon,#mermaid-svg-OLPGxGPhDiOvnRO8 .node path{fill:#ECECFF;stroke:#9370DB;stroke-width:1px;}#mermaid-svg-OLPGxGPhDiOvnRO8 .divider{stroke:#9370DB;stroke:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 g.clickable{cursor:pointer;}#mermaid-svg-OLPGxGPhDiOvnRO8 g.classGroup rect{fill:#ECECFF;stroke:#9370DB;}#mermaid-svg-OLPGxGPhDiOvnRO8 g.classGroup line{stroke:#9370DB;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 .classLabel .box{stroke:none;stroke-width:0;fill:#ECECFF;opacity:0.5;}#mermaid-svg-OLPGxGPhDiOvnRO8 .classLabel .label{fill:#9370DB;font-size:10px;}#mermaid-svg-OLPGxGPhDiOvnRO8 .relation{stroke:#333333;stroke-width:1;fill:none;}#mermaid-svg-OLPGxGPhDiOvnRO8 .dashed-line{stroke-dasharray:3;}#mermaid-svg-OLPGxGPhDiOvnRO8 #compositionStart,#mermaid-svg-OLPGxGPhDiOvnRO8 .composition{fill:#333333!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 #compositionEnd,#mermaid-svg-OLPGxGPhDiOvnRO8 .composition{fill:#333333!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 #dependencyStart,#mermaid-svg-OLPGxGPhDiOvnRO8 .dependency{fill:#333333!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 #dependencyStart,#mermaid-svg-OLPGxGPhDiOvnRO8 .dependency{fill:#333333!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 #extensionStart,#mermaid-svg-OLPGxGPhDiOvnRO8 .extension{fill:#333333!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 #extensionEnd,#mermaid-svg-OLPGxGPhDiOvnRO8 .extension{fill:#333333!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 #aggregationStart,#mermaid-svg-OLPGxGPhDiOvnRO8 .aggregation{fill:#ECECFF!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 #aggregationEnd,#mermaid-svg-OLPGxGPhDiOvnRO8 .aggregation{fill:#ECECFF!important;stroke:#333333!important;stroke-width:1;}#mermaid-svg-OLPGxGPhDiOvnRO8 .edgeTerminals{font-size:11px;}#mermaid-svg-OLPGxGPhDiOvnRO8 :root{--mermaid-font-family:"trebuchet ms",verdana,arial,sans-serif;}OdomNodeimuCBimu_calibrated?imu_bufferimu_calibrated()Get_imu_meas()icpCBdlo_initialized?adaptive_params_use_?target_cloud nullptr?initializeDLO()preprocessPoints()thread_computeMetrics()setAdaptiveParams()initializeInputTarget()setInputSources()getNextPose()updateKeyframes()thread_publishToROS()thread_debug()getNextPoseimus2ss2m-posegicp_s2s.align(imu_SE3)propagateS2S(T_S2S)getSubmapKeyframes()gicp.align(this-T_s2s)propagateS2M()主函数 main
描述
主函数基本啥也没干就定义了OdomNode然后等待ros结束捕捉了程序终止的指令OdomNode 主要是两个回调激光和imu回调下面有其详细的描述
void controlC(int sig) {dlo::OdomNode::abort();}int main(int argc, char** argv) {ros::init(argc, argv, dlo_odom_node);ros::NodeHandle nh(~);signal(SIGTERM, controlC);sleep(0.5);dlo::OdomNode node(nh);ros::AsyncSpinner spinner(0);spinner.start();node.start();ros::waitForShutdown();return 0;}核心为 dlo::OdomNode
核心成员
// 订阅
ros::Subscriber icp_sub;
ros::Subscriber imu_sub;
this-icp_sub this-nh.subscribe(pointcloud, 1, dlo::OdomNode::icpCB, this);
this-imu_sub this-nh.subscribe(imu, 1, dlo::OdomNode::imuCB, this);//发布
ros::Publisher odom_pub;
ros::Publisher pose_pub;
ros::Publisher keyframe_pub;
ros::Publisher kf_pub;
this-odom_pub this-nh.advertisenav_msgs::Odometry(odom, 1);
this-pose_pub this-nh.advertisegeometry_msgs::PoseStamped(pose, 1);
this-kf_pub this-nh.advertisenav_msgs::Odometry(kfs, 1, true);
this-keyframe_pub this-nh.advertisesensor_msgs::PointCloud2(keyframe, 1, true);激光回调函数 icpCB
主要功能
imu - s2s - s2m 的一个match 得到姿态
步骤
两个检测不满足直接返回 若 点云中 点个数过少DLO 程序未初始化则初始化IMU 校准重力对齐 数据预处理点云关键帧阈值 点云预处理 去 Nan点 区域滤波 体素滤波计算关键帧指标独立线程计算 该帧点云长度中位数进行低通滤波然后 基于长度设定关键帧选择阈值 基于指标确定关键帧阈值若目标信息为空时进行初始化然后 return 当前帧设为源数据得到 当前帧的位姿 getNextPose IMU S2S S2M 三者共同作用得到的 更新关键帧 得到关键帧周围数量 最近关键帧距离及id启动独立线程发送数据到ros启动独立线程调试语句并发布自定义 DLO 消息
/*** ICP Point Cloud Callback**/
void dlo::OdomNode::icpCB(const sensor_msgs::PointCloud2ConstPtr pc) {double then ros::Time::now().toSec();/// 当前时刻更新this-scan_stamp pc-header.stamp;this-curr_frame_stamp pc-header.stamp.toSec();// If there are too few points in the pointcloud, try again/// 如果点云中的点太少该帧直接丢弃this-current_scan pcl::PointCloudPointType::Ptr (new pcl::PointCloudPointType);pcl::fromROSMsg(*pc, *this-current_scan);if (this-current_scan-points.size() this-gicp_min_num_points_) {ROS_WARN(Low number of points!);return;}// DLO Initialization procedures (IMU calib, gravity align)/// DLO 初始化程序IMU 校准重力对齐if (!this-dlo_initialized) {this-initializeDLO();return;}// Preprocess points 预处理点云this-preprocessPoints();// Compute Metrics 计算关键帧指标独立线程计算this-metrics_thread std::thread( dlo::OdomNode::computeMetrics, this );this-metrics_thread.detach(); // detach()的作用是将子线程和主线程的关联分离// Set Adaptive Parameters 根据空间度量设置关键帧阈值if (this-adaptive_params_use_){this-setAdaptiveParams();}// Set initial frame as target 初始化目标信息if(this-target_cloud nullptr) {this-initializeInputTarget();return;}// Set source frame 设置源数据this-source_cloud pcl::PointCloudPointType::Ptr (new pcl::PointCloudPointType);this-source_cloud this-current_scan;// Set new frame as input source for both gicp objectsthis-setInputSources();// Get the next pose via IMU S2S S2Mthis-getNextPose();// 更新当前关键帧得到关键帧周围数量 最近关键帧距离及id// Update current keyframe poses and mapthis-updateKeyframes();// Update trajectory 更新轨迹this-trajectory.push_back( std::make_pair(this-pose, this-rotq) );// Update next time stampthis-prev_frame_stamp this-curr_frame_stamp;// Update some statistics 计算本次耗时this-comp_times.push_back(ros::Time::now().toSec() - then);// Publish stuff to ROS 启动独立线程发送数据到rosthis-publish_thread std::thread( dlo::OdomNode::publishToROS, this );this-publish_thread.detach();// Debug statements and publish custom DLO message// 启动独立线程调试语句并发布自定义 DLO 消息this-debug_thread std::thread( dlo::OdomNode::debug, this );this-debug_thread.detach();}初始化 initializeDLO
步骤
若 使用imu 但未标定时直接 返回若 使用imu已经标定需重力对齐未初始化 进行重力对齐 若提供初始位姿则初始位姿进行赋值
void dlo::OdomNode::initializeDLO() {// Calibrate IMU 使用imu但未标定时returnif (!this-imu_calibrated this-imu_use_) {return;}// Gravity Align 重力对齐if (this-gravity_align_ this-imu_use_ this-imu_calibrated !this-initial_pose_use_) {std::cout Aligning to gravity... ; std::cout.flush();this-gravityAlign();}// Use initial known pose 若提供初始位姿时进行赋值if (this-initial_pose_use_) {std::cout Setting known initial pose... ; std::cout.flush();// set known position 赋值变量有pose T,t_s2s,T_s2s_prev,originthis-pose this-initial_position_;this-T.block(0,3,3,1) this-pose;this-T_s2s.block(0,3,3,1) this-pose;this-T_s2s_prev.block(0,3,3,1) this-pose;this-origin this-initial_position_;// set known orientation 赋值变量有rotq,T,T_s2s,T_s2s_prevthis-rotq this-initial_orientation_;this-T.block(0,0,3,3) this-rotq.toRotationMatrix();this-T_s2s.block(0,0,3,3) this-rotq.toRotationMatrix();this-T_s2s_prev.block(0,0,3,3) this-rotq.toRotationMatrix();std::cout done std::endl std::endl;}this-dlo_initialized true;std::cout DLO initialized! Starting localization... std::endl;}重力对齐 gravityAlign
步骤
原理缓存1s以内加速度的值将初始方向设为重力方向与加速度方向的夹角取一秒之内的 imu 加速度并计算其 均值加速度归一化均值加速度定义重力矢量假设点向下计算两个向量之间的角度并进行归一化设置重力对齐方向并打印
void dlo::OdomNode::gravityAlign() {// get average acceleration vector for 1 second and normalizeEigen::Vector3f lin_accel Eigen::Vector3f::Zero();/// 取一秒之内的 imu 加速度并计算其 均值加速度 const double then ros::Time::now().toSec();int n0;while ((ros::Time::now().toSec() - then) 1.) {lin_accel[0] this-imu_meas.lin_accel.x;lin_accel[1] this-imu_meas.lin_accel.y;lin_accel[2] this-imu_meas.lin_accel.z;n;}lin_accel[0] / n; lin_accel[1] / n; lin_accel[2] / n;// normalize 归一化单位1double lin_norm sqrt(pow(lin_accel[0], 2) pow(lin_accel[1], 2) pow(lin_accel[2], 2));lin_accel[0] / lin_norm; lin_accel[1] / lin_norm; lin_accel[2] / lin_norm;// define gravity vector (assume point downwards) 定义重力矢量假设点向下Eigen::Vector3f grav;grav 0, 0, 1;// calculate angle between the two vectors 计算两个向量之间的角度Eigen::Quaternionf grav_q Eigen::Quaternionf::FromTwoVectors(lin_accel, grav);// normalize 角度进行归一化double grav_norm sqrt(grav_q.w()*grav_q.w() grav_q.x()*grav_q.x() grav_q.y()*grav_q.y() grav_q.z()*grav_q.z());grav_q.w() / grav_norm; grav_q.x() / grav_norm; grav_q.y() / grav_norm; grav_q.z() / grav_norm;// set gravity aligned orientation 设置重力对齐方向this-rotq grav_q;this-T.block(0,0,3,3) this-rotq.toRotationMatrix();this-T_s2s.block(0,0,3,3) this-rotq.toRotationMatrix();this-T_s2s_prev.block(0,0,3,3) this-rotq.toRotationMatrix();// rpy 并进行打印auto euler grav_q.toRotationMatrix().eulerAngles(2, 1, 0);double yaw euler[0] * (180.0/M_PI);double pitch euler[1] * (180.0/M_PI);double roll euler[2] * (180.0/M_PI);std::cout done std::endl;std::cout Roll [deg]: roll std::endl;std::cout Pitch [deg]: pitch std::endl std::endl;
}点云预处理 preprocessPoints
步骤
保留原始点云 original_scan当前点云 移除 Nans 的点当前点云 过滤指定立方体的点云1m 以内的当前点云 体素滤波scan:0.05/ submap:0.1
void dlo::OdomNode::preprocessPoints() {// Original Scan 保留原始点云用于关键帧存储*this-original_scan *this-current_scan;// Remove NaNs 移除Nans点std::vectorint idx;this-current_scan-is_dense false;pcl::removeNaNFromPointCloud(*this-current_scan, *this-current_scan, idx);// Crop Box Filter 过滤指定立方体内的点if (this-crop_use_) {this-crop.setInputCloud(this-current_scan);this-crop.filter(*this-current_scan);}// Voxel Grid Filter 体素滤波if (this-vf_scan_use_) {this-vf_scan.setInputCloud(this-current_scan);this-vf_scan.filter(*this-current_scan);}}关键帧指标 computeMetrics
步骤
主要调用了computeSpaciousness 库首先计算了各个激光点的长度取长度的中值并进行 低通滤波nth_element是一种部分排序算法它重新排列 [first, last) 中的元素得到 激光长度的中位数低通滤波0.950.05 比例将该值保存为关键帧阈值做准备
void dlo::OdomNode::computeMetrics() {this-computeSpaciousness();
}/**计算当前扫描的空间**/
void dlo::OdomNode::computeSpaciousness() {// compute range of points 计算激光点到原点的长度 std::vectorfloat ds;for (int i 0; i this-current_scan-points.size(); i) {float d std::sqrt(pow(this-current_scan-points[i].x, 2) pow(this-current_scan-points[i].y, 2) pow(this-current_scan-points[i].z, 2));ds.push_back(d);}// median 取长度的中值并进行 低通滤波// nth_element 是一种部分排序算法它重新排列 [first, last) 中的元素std::nth_element(ds.begin(), ds.begin() ds.size()/2, ds.end());float median_curr ds[ds.size()/2];static float median_prev median_curr;float median_lpf 0.95*median_prev 0.05*median_curr;median_prev median_lpf;// pushthis-metrics.spaciousness.push_back( median_lpf );}设定关键帧阈值setAdaptiveParams
步骤
根据空间度量设置关键帧阈值关键帧步长 若激光均值长度 [20,∞][20,\infty][20,∞]10.0 m若激光均值长度 [10,20][10,20][10,20]5.0 m若激光均值长度 [5,10][5,10][5,10]1.0 m若激光均值长度 [0.0,5.0][0.0,5.0][0.0,5.0]0.5 m
void dlo::OdomNode::setAdaptiveParams() {// Set Keyframe Thresh from Spaciousness Metricif (this-metrics.spaciousness.back() 20.0){this-keyframe_thresh_dist_ 10.0;} else if (this-metrics.spaciousness.back() 10.0 this-metrics.spaciousness.back() 20.0) {this-keyframe_thresh_dist_ 5.0;} else if (this-metrics.spaciousness.back() 5.0 this-metrics.spaciousness.back() 10.0) {this-keyframe_thresh_dist_ 1.0;} else if (this-metrics.spaciousness.back() 5.0) {this-keyframe_thresh_dist_ 0.5;}// set concave hull alphathis-concave_hull.setAlpha(this-keyframe_thresh_dist_);
}初始化目标数据 initializeInputTarget
步骤
先将点云数据转为 NanoGICP 格式的数据初始化关键帧若使用 submap时初始化 submap并进行体素滤波保留历史关键帧树 关键帧 位姿 是否第一次关键帧 点云当前关键帧 点云 计算 kdtree 和关键帧法线发布 关键帧点云独立线程关键帧个数1
相关信息
NanoGICP gicp_s2s,gicp 定制的迭代最接近点解算器用于轻量级点云扫描与交叉对象数据共享匹配
void dlo::OdomNode::initializeInputTarget() {this-prev_frame_stamp this-curr_frame_stamp;// Convert ros message 点云 pcl- NanoGICP 的转换并计算其协方差this-target_cloud pcl::PointCloudPointType::Ptr (new pcl::PointCloudPointType);this-target_cloud this-current_scan;this-gicp_s2s.setInputTarget(this-target_cloud);this-gicp_s2s.calculateTargetCovariances();// initialize keyframes 初始化关键帧pcl::PointCloudPointType::Ptr first_keyframe (new pcl::PointCloudPointType);pcl::transformPointCloud (*this-target_cloud, *first_keyframe, this-T);// voxelization for submap 若使用submap时设定submap 并进行体素滤波if (this-vf_submap_use_) {this-vf_submap.setInputCloud(first_keyframe);this-vf_submap.filter(*first_keyframe);}// keep history of keyframes 保留历史关键帧this-keyframes.push_back(std::make_pair(std::make_pair(this-pose, this-rotq), first_keyframe));*this-keyframes_cloud *first_keyframe;*this-keyframe_cloud *first_keyframe;// compute kdtree and keyframe normals (use gicp_s2s input source as temporary storage because it will be overwritten by setInputSources())// 计算 kdtree 和关键帧法线使用 gicp_s2s 输入源作为临时存储因为它将被 setInputSources() 覆盖this-gicp_s2s.setInputSource(this-keyframe_cloud);this-gicp_s2s.calculateSourceCovariances();this-keyframe_normals.push_back(this-gicp_s2s.getSourceCovariances());// 定义发布关键帧线程this-publish_keyframe_thread std::thread( dlo::OdomNode::publishKeyframe, this );this-publish_keyframe_thread.detach();// 关键帧数量1this-num_keyframes;}设置源数据 setInputSources
设置 S2S gicp 的输入源 这将构建源云的 KdTree这不会为 s2m 构建 KdTree因为 force_no_update 为真 使用自定义 NanoGICP 函数为 S2M gicp 设置 pcl::Registration 输入源现在使用之前构建的 KdTree 设置 S2M gicp 的 KdTree
void dlo::OdomNode::setInputSources(){// set the input source for the S2S gicp// this builds the KdTree of the source cloud// this does not build the KdTree for s2m because force_no_update is truethis-gicp_s2s.setInputSource(this-current_scan);// set pcl::Registration input source for S2M gicp using custom NanoGICP functionthis-gicp.registerInputSource(this-current_scan);// now set the KdTree of S2M gicp using previously built KdTreethis-gicp.source_kdtree_ this-gicp_s2s.source_kdtree_;this-gicp.source_covs_.clear();}得到下一个位姿 getNextPose
步骤
基于s2s得到全局姿态 若使用imu时则imu预积分得帧间位姿 imu_SE3imu_SE3 作为初值得到 s2s的关系基于上次全局姿态 进而得到当前位姿位姿 T_s2s, pose_s2s, rotSO3_s2s, rotq_s2s当前位姿当前姿态平移当前姿态旋转矩阵归一化四元数旋转为 s2m 赋值 s2s的协方差 得到 submap地图帧 先基于当前位姿得到submap地图帧索引 欧式距离 dist 凸包距离dist 凹包距离dist 若索引有变化则重新生成地图法向量 以 s2s 为初值进行 s2m匹配得到最终的全局位姿数据进行更新s2s值更新last值更新等
void dlo::OdomNode::getNextPose() {//// FRAME-TO-FRAME PROCEDURE//// Align using IMU prior if availablepcl::PointCloudPointType::Ptr aligned (new pcl::PointCloudPointType);// 若使用imu,则 imu进行积分并进行对齐if (this-imu_use_) {this-integrateIMU();this-gicp_s2s.align(*aligned, this-imu_SE3);} else {this-gicp_s2s.align(*aligned);}// Get the local S2S transform 通过 点到点匹配器 得到最后一帧的姿态Eigen::Matrix4f T_S2S this-gicp_s2s.getFinalTransformation();// Get the global S2S transform //计算得到最后一帧与前一帧的相对关系this-propagateS2S(T_S2S);// reuse covariances from s2s for s2m// 为 s2m 重用 s2s 的协方差this-gicp.source_covs_ this-gicp_s2s.source_covs_;// Swap source and target (which also swaps KdTrees internally) for next S2S// 为下一个 S2S 交换源和目标也在内部交换 KdTreesthis-gicp_s2s.swapSourceAndTarget();//// FRAME-TO-SUBMAP //// Get current global submap人 得到该帧submapthis-getSubmapKeyframes();// 如果子图已经改变则s2m匹配的target就改变了if (this-submap_hasChanged) {// Set the current global submap as the target cloudthis-gicp.setInputTarget(this-submap_cloud);// Set target clouds normals as submap normalsthis-gicp.setTargetCovariances( this-submap_normals );}// Align with current submap with global S2S transformation as initial guess// 与当前子图对齐将全局 S2S 转换作为初始猜测this-gicp.align(*aligned, this-T_s2s);// Get final transformation in global frame 在全局坐标系下得到最终的转换this-T this-gicp.getFinalTransformation();// Update the S2S transform for next propagation 更新s2s的转换为下次迭代做准备this-T_s2s_prev this-T;// Update next global pose// Both source and target clouds are in the global frame now, so tranformation is global//更新下一个全局姿势// 源云和目标云现在都在全局框架中所以转换是全局的不需要啥操作this-propagateS2M();// Set next target cloud as current source cloud 将下一个目标云设置为当前源云*this-target_cloud *this-source_cloud;}
Imu得帧间 integrateIMU
步骤
从imuBuf中取 两帧之间的 imu观测数据 imu_frame观测数据 imu_frame按时间进行排序进行 计算两帧之间的角度变化角度归一化位置给0进行赋值
void dlo::OdomNode::integrateIMU() {// Extract IMU data between the two framesstd::vectorImuMeas imu_frame;for (const auto i : this-imu_buffer) {// IMU data between two frames is when:// current frames timestamp minus imu timestamp is positive// previous frames timestamp minus imu timestamp is negativedouble curr_frame_imu_dt this-curr_frame_stamp - i.stamp;double prev_frame_imu_dt this-prev_frame_stamp - i.stamp;if (curr_frame_imu_dt 0. prev_frame_imu_dt 0.) {imu_frame.push_back(i);}}// Sort measurements by timestd::sort(imu_frame.begin(), imu_frame.end(), this-comparatorImu);// Relative IMU integration of gyro and accelerometerdouble curr_imu_stamp 0.;double prev_imu_stamp 0.;double dt;Eigen::Quaternionf q Eigen::Quaternionf::Identity();for (uint32_t i 0; i imu_frame.size(); i) {if (prev_imu_stamp 0.) {prev_imu_stamp imu_frame[i].stamp;continue;}// Calculate difference in imu measurement times IN SECONDScurr_imu_stamp imu_frame[i].stamp;dt curr_imu_stamp - prev_imu_stamp;prev_imu_stamp curr_imu_stamp;// Relative gyro propagation quaternion dynamicsEigen::Quaternionf qq q;q.w() - 0.5*( qq.x()*imu_frame[i].ang_vel.x qq.y()*imu_frame[i].ang_vel.y qq.z()*imu_frame[i].ang_vel.z ) * dt;q.x() 0.5*( qq.w()*imu_frame[i].ang_vel.x - qq.z()*imu_frame[i].ang_vel.y qq.y()*imu_frame[i].ang_vel.z ) * dt;q.y() 0.5*( qq.z()*imu_frame[i].ang_vel.x qq.w()*imu_frame[i].ang_vel.y - qq.x()*imu_frame[i].ang_vel.z ) * dt;q.z() 0.5*( qq.x()*imu_frame[i].ang_vel.y - qq.y()*imu_frame[i].ang_vel.x qq.w()*imu_frame[i].ang_vel.z ) * dt;}// Normalize quaterniondouble norm sqrt(q.w()*q.w() q.x()*q.x() q.y()*q.y() q.z()*q.z());q.w() / norm; q.x() / norm; q.y() / norm; q.z() / norm;// Store IMU guessthis-imu_SE3 Eigen::Matrix4f::Identity();this-imu_SE3.block(0, 0, 3, 3) q.toRotationMatrix();}得到 getSubmapKeyframes
步骤
先清空用于生成子图的关键帧索引容器计算每个帧到当前帧的欧氏距离平移量将每种 符合前K个最近邻帧放入 submap队列 查找来自 距离 关键帧的前k个最邻近帧查找来自 凸包 关键帧的前k个最邻近帧查找来自 凹包 关键帧的前k个最邻近帧 将submap关键帧index 进行排序然后删除重复的id sort 排序unique删除重复 当前submap_index 与旧submap_index 是否一致 一致则直接结束否则重新生成 submap点云地图法向量并更新 旧submap_index
void dlo::OdomNode::getSubmapKeyframes() {// clear vector of keyframe indices to use for submap// 清空用于子图的关键帧索引容器this-submap_kf_idx_curr.clear();//// TOP K NEAREST NEIGHBORS FROM ALL KEYFRAMES// 来自所有关键帧的 前K个最近邻帧// calculate distance between current pose and poses in keyframe setstd::vectorfloat ds;std::vectorint keyframe_nn; int i0;Eigen::Vector3f curr_pose this-T_s2s.block(0,3,3,1);for (const auto k : this-keyframes) {// 计算位姿平移量的欧氏距离float d sqrt( pow(curr_pose[0] - k.first.first[0], 2) pow(curr_pose[1] - k.first.first[1], 2) pow(curr_pose[2] - k.first.first[2], 2) );ds.push_back(d);keyframe_nn.push_back(i); i;}// get indices for top K nearest neighbor keyframe poses// 得到 k个最近 关键帧位姿的下标this-pushSubmapIndices(ds, this-submap_knn_, keyframe_nn);//// TOP K NEAREST NEIGHBORS FROM CONVEX HULL//// get convex hull indices 得到关键帧位姿凸包的indexthis-computeConvexHull();// get distances for each keyframe on convex hull// 得到凸包中每个关键帧到当前帧位姿的距离std::vectorfloat convex_ds;for (const auto c : this-keyframe_convex) {convex_ds.push_back(ds[c]);}// get indicies for top kNN for convex hull // 筛选出凸包 k个邻近关键帧的下标this-pushSubmapIndices(convex_ds, this-submap_kcv_, this-keyframe_convex);//// TOP K NEAREST NEIGHBORS FROM CONCAVE HULL//// get concave hull indices 获取凹包的indexsthis-computeConcaveHull();// get distances for each keyframe on concave hullstd::vectorfloat concave_ds;for (const auto c : this-keyframe_concave) {concave_ds.push_back(ds[c]);}// get indicies for top kNN for convex hullthis-pushSubmapIndices(concave_ds, this-submap_kcc_, this-keyframe_concave);//// BUILD SUBMAP//// concatenate all submap clouds and normals// 将submap关键帧 进行排序然后删除重复的idstd::sort(this-submap_kf_idx_curr.begin(), this-submap_kf_idx_curr.end());auto last std::unique(this-submap_kf_idx_curr.begin(), this-submap_kf_idx_curr.end());this-submap_kf_idx_curr.erase(last, this-submap_kf_idx_curr.end());// sort current and previous submap kf list of indicesstd::sort(this-submap_kf_idx_curr.begin(), this-submap_kf_idx_curr.end());std::sort(this-submap_kf_idx_prev.begin(), this-submap_kf_idx_prev.end());// check if submap has changed from previous iteration 检测submap是否改变if (this-submap_kf_idx_curr this-submap_kf_idx_prev){this-submap_hasChanged false;} else {this-submap_hasChanged true;// reinitialize submap cloud, normals// 重新初始化 子图 点云法线并赋值上一次点云pcl::PointCloudPointType::Ptr submap_cloud_ (boost::make_sharedpcl::PointCloudPointType());this-submap_normals.clear();for (auto k : this-submap_kf_idx_curr) {// create current submap cloud 生成submap*submap_cloud_ *this-keyframes[k].second;// grab corresponding submap clouds normalsthis-submap_normals.insert( std::end(this-submap_normals), std::begin(this-keyframe_normals[k]), std::end(this-keyframe_normals[k]) );}this-submap_cloud submap_cloud_;this-submap_kf_idx_prev this-submap_kf_idx_curr;}}取k个最近帧下标 pushSubmapIndices
思路
先按照优先队列的思路找到第k小的dist元素然后遍历 frame_dist将小于等于该值的 frame放入 submap_kf_idx_curr
void dlo::OdomNode::pushSubmapIndices(std::vectorfloat dists, int k, std::vectorint frames) {// maintain max heap of at most k elements// 定义一个 优先队列std::priority_queuefloat pq;/// 优先队列默认大顶堆top最大这样只保留了 较小的 K个数for (auto d : dists) {if (pq.size() k pq.top() d) {pq.push(d);pq.pop();} else if (pq.size() k) {pq.push(d);}}// get the kth smallest element, which should be at the top of the heap// / 获取第k小的元素它应该在堆顶float kth_element pq.top();// get all elements smaller or equal to the kth smallest element// 获取小于或等于第 k 个最小元素的所有元素for (int i 0; i dists.size(); i) {if (dists[i] kth_element)this-submap_kf_idx_curr.push_back(frames[i]);}}关键帧凸包 computeConvexHull
思路
将位姿作为点云基于凸包进行索引凹包类似
步骤
将关键帧位姿 push 作为点云计算点云的凸包获取凸包上关键帧的索引赋值关键帧凸包索引
void dlo::OdomNode::computeConvexHull() {// at least 4 keyframes for convex hullif (this-num_keyframes 4) {return;}// create a pointcloud with points at keyframespcl::PointCloudPointType::Ptr cloud pcl::PointCloudPointType::Ptr (new pcl::PointCloudPointType);for (const auto k : this-keyframes) {PointType pt;pt.x k.first.first[0];pt.y k.first.first[1];pt.z k.first.first[2];cloud-push_back(pt);}// calculate the convex hull of the point cloud 计算点云的凸包this-convex_hull.setInputCloud(cloud);// get the indices of the keyframes on the convex hull 获取凸包上关键帧的索引pcl::PointCloudPointType::Ptr convex_points pcl::PointCloudPointType::Ptr (new pcl::PointCloudPointType);this-convex_hull.reconstruct(*convex_points);pcl::PointIndices::Ptr convex_hull_point_idx pcl::PointIndices::Ptr (new pcl::PointIndices);this-convex_hull.getHullPointIndices(*convex_hull_point_idx);this-keyframe_convex.clear();for (int i0; iconvex_hull_point_idx-indices.size(); i) {this-keyframe_convex.push_back(convex_hull_point_idx-indices[i]);}}更新关键帧updateKeyframes
步骤
将当前点云基于全局位姿转换到 map坐标系遍历关键帧计算当前姿势附近的数量 最近距离及id 计算当前姿势和关键帧姿势之间的距离基于距离1.5阈值 得到 当前姿态附近的数量
void dlo::OdomNode::updateKeyframes() {// transform point cloudthis-transformCurrentScan();// calculate difference in pose and rotation to all poses in trajectoryfloat closest_d std::numeric_limitsfloat::infinity();int closest_idx 0;int keyframes_idx 0;int num_nearby 0;for (const auto k : this-keyframes) {// calculate distance between current pose and pose in keyframesfloat delta_d sqrt( pow(this-pose[0] - k.first.first[0], 2) pow(this-pose[1] - k.first.first[1], 2) pow(this-pose[2] - k.first.first[2], 2) );// count the number nearby current poseif (delta_d this-keyframe_thresh_dist_ * 1.5){num_nearby;}// store into variableif (delta_d closest_d) {closest_d delta_d;closest_idx keyframes_idx;}keyframes_idx;}imu回调 imuCB
功能
该回调中主要做的活先静止几秒算bias然后数据去偏差放入队列中
步骤
得到imu的测量数据角速度线速度若未标定过则进行求bias偏差 先取静止三秒的 陀螺仪和加速度计数据然后求均值作为偏差 将测量数据减去bias得到实际测量值将实际测量值放入队列中
void dlo::OdomNode::imuCB(const sensor_msgs::Imu::ConstPtr imu) {// 不使用imu时直接returnif (!this-imu_use_) {return;}double ang_vel[3], lin_accel[3];// Get IMU samples 得到imu的测量数据角速度线速度ang_vel[0] imu-angular_velocity.x;ang_vel[1] imu-angular_velocity.y;ang_vel[2] imu-angular_velocity.z;lin_accel[0] imu-linear_acceleration.x;lin_accel[1] imu-linear_acceleration.y;lin_accel[2] imu-linear_acceleration.z;if (this-first_imu_time 0.) {this-first_imu_time imu-header.stamp.toSec();}// IMU calibration procedure - do for three seconds IMU 校准程序 - 做三秒钟 if (!this-imu_calibrated) { //imu未标定时static int num_samples 0;static bool print true;if ((imu-header.stamp.toSec() - this-first_imu_time) this-imu_calib_time_) {num_samples;this-imu_bias.gyro.x ang_vel[0];this-imu_bias.gyro.y ang_vel[1];this-imu_bias.gyro.z ang_vel[2];this-imu_bias.accel.x lin_accel[0];this-imu_bias.accel.y lin_accel[1];this-imu_bias.accel.z lin_accel[2];// 一次打印imu开始标定if(print) {std::cout Calibrating IMU for this-imu_calib_time_ seconds... ; std::cout.flush();print false;}} else {// 计算 陀螺仪b和加速度g的平均偏差标定成功this-imu_bias.gyro.x / num_samples;this-imu_bias.gyro.y / num_samples;this-imu_bias.gyro.z / num_samples;this-imu_bias.accel.x / num_samples;this-imu_bias.accel.y / num_samples;this-imu_bias.accel.z / num_samples;this-imu_calibrated true;std::cout done std::endl;std::cout Gyro biases [xyz]: this-imu_bias.gyro.x , this-imu_bias.gyro.y , this-imu_bias.gyro.z std::endl std::endl;}} else {// Apply the calibrated bias to the new IMU measurementsthis-imu_meas.stamp imu-header.stamp.toSec();// 去过畸变的 bias将其放入队列中this-imu_meas.ang_vel.x ang_vel[0] - this-imu_bias.gyro.x;this-imu_meas.ang_vel.y ang_vel[1] - this-imu_bias.gyro.y;this-imu_meas.ang_vel.z ang_vel[2] - this-imu_bias.gyro.z;this-imu_meas.lin_accel.x lin_accel[0];this-imu_meas.lin_accel.y lin_accel[1];this-imu_meas.lin_accel.z lin_accel[2];// Store into circular bufferthis-mtx_imu.lock();this-imu_buffer.push_front(this-imu_meas);this-mtx_imu.unlock();}}MapNode
主函数
该主函数主要构建了 MapNode然后等待直到 ros关闭为止
#include dlo/map.hvoid controlC(int sig) {dlo::MapNode::abort();}int main(int argc, char** argv) {ros::init(argc, argv, dlo_map_node);ros::NodeHandle nh(~);signal(SIGTERM, controlC);sleep(0.5);dlo::MapNode node(nh);ros::AsyncSpinner spinner(1);spinner.start();node.start();ros::waitForShutdown();return 0;}MapNode 核心成员
class dlo::MapNode {public:MapNode(ros::NodeHandle node_handle);~MapNode();static void abort() {abort_ true;}void start();void stop();private:void abortTimerCB(const ros::TimerEvent e);void publishTimerCB(const ros::TimerEvent e);void keyframeCB(const sensor_msgs::PointCloud2ConstPtr keyframe);bool savePcd(direct_lidar_odometry::save_pcd::Request req,direct_lidar_odometry::save_pcd::Response res);void getParams();ros::NodeHandle nh;ros::Timer abort_timer;ros::Timer publish_timer;ros::Subscriber keyframe_sub;ros::Publisher map_pub;ros::ServiceServer save_pcd_srv;pcl::PointCloudPointType::Ptr dlo_map;pcl::VoxelGridPointType voxelgrid;ros::Time map_stamp;std::string odom_frame;bool publish_full_map_;double publish_freq_;double leaf_size_;static std::atomicbool abort_;};
构造函数 MapNode
步骤
获取参数 string odom_frame; bool publish_full_map_ ; double publish_freq_; double leaf_size_;获取节点 NS 并删除前导字符 ns连接帧名称字符串 odom_frame ns / this-odom_frame; 创建定时器abort_timer实时检测 程序是否停止停止则使ros停止如果 发布整个地图时则 创建发布地图定时器按一定周期发布点云地图PointCloud2
dlo::MapNode::MapNode(ros::NodeHandle node_handle) : nh(node_handle) {// 得到参数this-getParams();this-abort_timer this-nh.createTimer(ros::Duration(0.01), dlo::MapNode::abortTimerCB, this);if (this-publish_full_map_){this-publish_timer this-nh.createTimer(ros::Duration(this-publish_freq_), dlo::MapNode::publishTimerCB, this);}// 订阅关键帧发布点云地图this-keyframe_sub this-nh.subscribe(keyframes, 1, dlo::MapNode::keyframeCB, this);this-map_pub this-nh.advertisesensor_msgs::PointCloud2(map, 1);// 保存地图到 pcd 服务this-save_pcd_srv this-nh.advertiseService(save_pcd, dlo::MapNode::savePcd, this);// initialize map 初始化地图this-dlo_map pcl::PointCloudPointType::Ptr (new pcl::PointCloudPointType);ROS_INFO(DLO Map Node Initialized);
}关键帧回调 keyframeCB
步骤
将关键帧数据格式由 ros-pcl关键帧点云数据进行体素滤波降采样关键帧加载到全局地图中发布地图
void dlo::MapNode::keyframeCB(const sensor_msgs::PointCloud2ConstPtr keyframe) {// convert scan to pcl format : ros type to pclpcl::PointCloudPointType::Ptr keyframe_pcl pcl::PointCloudPointType::Ptr (new pcl::PointCloudPointType);pcl::fromROSMsg(*keyframe, *keyframe_pcl);// voxel filter : 该关键帧进行降采样this-voxelgrid.setLeafSize(this-leaf_size_, this-leaf_size_, this-leaf_size_);this-voxelgrid.setInputCloud(keyframe_pcl);this-voxelgrid.filter(*keyframe_pcl);// save keyframe to map 地图中保存该关键帧this-map_stamp keyframe-header.stamp;*this-dlo_map *keyframe_pcl;if (!this-publish_full_map_) {// 发布if (keyframe_pcl-points.size() keyframe_pcl-width * keyframe_pcl-height) {sensor_msgs::PointCloud2 map_ros;pcl::toROSMsg(*keyframe_pcl, map_ros);map_ros.header.stamp ros::Time::now();map_ros.header.frame_id this-odom_frame;this-map_pub.publish(map_ros);}}}保存地图 savePcd
步骤
地图进行提速滤波后保存
bool dlo::MapNode::savePcd(direct_lidar_odometry::save_pcd::Request req,direct_lidar_odometry::save_pcd::Response res) {// 地图格式转换pcl::PointCloudPointType::Ptr m pcl::PointCloudPointType::Ptr (boost::make_sharedpcl::PointCloudPointType(*this-dlo_map));float leaf_size req.leaf_size;std::string p req.save_path;std::cout std::setprecision(2) Saving map to p /dlo_map.pcd ... ; std::cout.flush();// voxelize map 地图进行体素滤波pcl::VoxelGridPointType vg;vg.setLeafSize(leaf_size, leaf_size, leaf_size);vg.setInputCloud(m);vg.filter(*m);// save map 保存地图int ret pcl::io::savePCDFileBinary(p /dlo_map.pcd, *m);res.success ret 0;if (res.success) {std::cout done std::endl;} else {std::cout failed std::endl;}return res.success;}相关
点云曲面重建 知乎 here PCL库种surface模块是用来对三维扫描获取的原始点云进行曲面重建的该模块包含实现点云重建的基础算法与数据结构。 ConvexHull凸包、ConcaveHull凹包
ConvexHull凸包 Class pcl::ConcaveHull PointInT 类ConvexHull实现了创建凸多边形的算法该类的实现其实是Hull库实现的接口封装ConcvexHull支持二维和三维点集。 #include pcl/surface/convex_hull.h
ConvexHull ()
// 空构造函数
virtual ~ConvexHull ()
// 空析构函数
void reconstruct (PointCloud points, std::vector pcl::Vertices polygons)
// 计算所有输入点的凸多边形,参数 points存储最终产生的凹多边形上的顶点参数 polygons存储一系列顶点集,每个顶点集构成的一个多边形,Vertices数据结构包含一组点的索引,索引是point中的点对应的索引。
void reconstruct (PointCloud points)
// 计算所有输入点的凸多边形﹐输出的维数取决于输入的维数是二维或三维,输出结果为多边形顶点并存储在参数output中。
void setComputeAreaVolume (bool value)
// 是否计算凸多边形的面积和体积,如果参数value为真,调用qhull库计算凸多边形的总面积和总体积。为节省计算资源,参数value缺省值为false。
double getTotalArea () const
// 获取凸包的总面积,如果 setComputeAreaVolume设置为真时,才会有有效输出。
double getTotalVolume () const
// 获取凸包的总体积,如果 setComputeAreaVolume设置为真时,才会有有效输出。
void setDimension (int dimension)
// 设置输入数据的维数(二维或三维),参数dimension为设置输入数据的维度﹐如果没有设置,输入数据的维度将自动根据输入点云设置。
int getDimension () const
// 返回计算得到多边形的维数(二维或三维)。
void getHullPointIndices (pcl::PointIndices hull_point_indices) const
// 检索凸包的输入点云的索引。ConcaveHull凹包 类ConcaveHull实现了创建凹多边形的算法该类的实现其实是Hull库实现的接口封装ConcaveHull支持二维和三维点集。 #include pcl/surface/concave_hull.h
ConcaveHull ()
// 空构造函数
virtual ~ConcaveHull ()
// 空析构函数
void setSearchMethod (const KdTreePtr tree)
// 设置搜索时所用的搜索机制参数tree指向搜索时所用的搜索对象例如kd-tree、octree等对象。
virtual void setInputCloud (const PointCloudConstPtr cloud)
// 设置输入点云,参数cloud 为输入点云的共享指针引用。
virtual void setIndices (const IndicesPtr indices)
// 为输入点云提供点云索引向量的指针。
virtual void setIndices (const IndicesConstPtr indices)
// 为输入点云提供点云索引向量的指针,该索引为常数,不能更改。
virtual void setIndices (const PointIndicesConstPtr indices)
// 为输入点云提供点云不变索引向量指针的指针。
virtual void setIndices (size_t row_start, size_t col_start, size_t nb_rows, size_t nb_cols)
// 为输入点云中要用到的部分提供索引,该方法只能用于有序点云。参数row_start为行偏移、col_start为列偏移、nb_rows表示 row_start进行的行偏移数,nb_cols表示进行的列偏移数。
void reconstruct (PointCloud points, std::vector pcl::Vertices polygons)
// 计算所有输入点的凹多边形,参数 points存储最终产生的凹多边形上的顶点参数 polygons存储一系列顶点集,每个顶点集构成的一个多边形,Vertices数据结构包含一组点的索引,索引是point中的点对应的索引。
void reconstruct (PointCloud output)
// 计算所有输入点的凹多边形﹐输出的维数取决于输入的维数是二维或三维,输出结果为多边形顶点并存储在参数output中。
void setAlpha (double alpha)
// 设置alpha值,参数alpha限制voronoi图中多边形边长的长短(边长越短多边形越细分),alpha值是一个非零的正值,定义了voronoi图中多边形顶点到中心点的最大距离。
double getAlpha ()
// 获取alpha值
void setVoronoiCenters (PointCloudPtr voronoi_centers)
// 设置参数voronoi_centers,如果设置,最终产生的凹多边形对应的voronoi图中的多边形的中心将被存储在参数voronoi_centers中。
void setKeepInformation (bool value)
// 设置参数keep_information,如果keep_information为真,凹多边形中的顶点就保留其他信息,如 RGB值、法线等。
int getDim () const
// 返回计算得到多边形的维数(二维或三维)。
int getDimension () const
// 返回计算得到多边形的维数(二维或三维)。
void setDimension (int dimension)
// 设置输入数据的维数(二维或三维),参数dimension为设置输入数据的维度﹐如果没有设置,输入数据的维度将自动根据输入点云设置。
void getHullPointIndices (pcl::PointIndices hull_point_indices) const
// 检索凸包的输入点云的索引。End